nvme fixes for Linux 6.13
- Fix device specific quirk for PRP list alignment (Robert) - Fix target name overflow (Leo) - Fix target write granularity (Luis) - Fix target sleeping in atomic context (Nilay) - Remove unnecessary tcp queue teardown (Chunguang) -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE3Fbyvv+648XNRdHTPe3zGtjzRgkFAmd0K5cACgkQPe3zGtjz Rgm5jQ/9EdJb8y3eFetsAs7P5JyeZKdOWOBLgm1fdP5kPeQrbPgOqtXag2JQbdLY rFI7fm9wTsSnrz+v+4iRTwS0MPd/WTJw9cA+lTFlFBoc871Obg+aniiswxW+lnl2 1KLzVSRFU3LbSSSBCNi+op+MVIgbVmiLZq+mKI7JqG4WrDXumEUSlNV4gLtDPiJo Z8fYoEtZIgsfmm9p8ySs6nmyrqsyM567ISoaxhAcisfAIXz20ul3fHkeLI4wa0xD gQPWdrz9Yz5aijr40FuiFBwKHU1Zg/vlqTl8o5gGZvKsx/epJpnQNoDTagmmYR8u oGU+c1R9LHVnxJjDitP6uyseafCYBJVfCdZoVXFdcDvc9aY2Pe2Sgo5y9IZGuLlP Vis7PHN/vFpTF1SRVFDALxXSkGR67zYSVB58CpWyxIFv3y1B212yicvrPfT5xcNE SbZglQd9qVaGuzXwKKHf80NOucEgagxYluCDKBOXCZj+u0S92ZAHuWv88WvzIFdL oK/GmFNxE3hAUfENf6FJ8Rfzx9+a+bN+QpaI1SCGFAS7dsQ8qlOBDPfQnYT8Q2T7 yN1LHvnBEqx59/yCSN0FdWTpTEne8TgGJkj+EZN9sblbJaSfOr3f5rQbz59SXHHh Zap5KIlaPsPdukcWwrZp34meq3BpUuP3ELPxxcdINS4busOLP98= =SspQ -----END PGP SIGNATURE----- Merge tag 'nvme-6.13-2024-12-31' of git://git.infradead.org/nvme into block-6.13 Pull NVMe fixes from Keith: "nvme fixes for Linux 6.13 - Fix device specific quirk for PRP list alignment (Robert) - Fix target name overflow (Leo) - Fix target write granularity (Luis) - Fix target sleeping in atomic context (Nilay) - Remove unnecessary tcp queue teardown (Chunguang)" * tag 'nvme-6.13-2024-12-31' of git://git.infradead.org/nvme: nvme-tcp: remove nvme_tcp_destroy_io_queues() nvmet-loop: avoid using mutex in IO hotpath nvmet: propagate npwg topology nvmet: Don't overflow subsysnqn nvme-pci: 512 byte aligned dma pool segment quirk
This commit is contained in:
commit
cc0331e29f
9 changed files with 108 additions and 81 deletions
|
@ -173,6 +173,11 @@ enum nvme_quirks {
|
|||
* MSI (but not MSI-X) interrupts are broken and never fire.
|
||||
*/
|
||||
NVME_QUIRK_BROKEN_MSI = (1 << 21),
|
||||
|
||||
/*
|
||||
* Align dma pool segment size to 512 bytes
|
||||
*/
|
||||
NVME_QUIRK_DMAPOOL_ALIGN_512 = (1 << 22),
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -2834,15 +2834,20 @@ static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
|
|||
|
||||
static int nvme_setup_prp_pools(struct nvme_dev *dev)
|
||||
{
|
||||
size_t small_align = 256;
|
||||
|
||||
dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
|
||||
NVME_CTRL_PAGE_SIZE,
|
||||
NVME_CTRL_PAGE_SIZE, 0);
|
||||
if (!dev->prp_page_pool)
|
||||
return -ENOMEM;
|
||||
|
||||
if (dev->ctrl.quirks & NVME_QUIRK_DMAPOOL_ALIGN_512)
|
||||
small_align = 512;
|
||||
|
||||
/* Optimisation for I/Os between 4k and 128k */
|
||||
dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev,
|
||||
256, 256, 0);
|
||||
256, small_align, 0);
|
||||
if (!dev->prp_small_pool) {
|
||||
dma_pool_destroy(dev->prp_page_pool);
|
||||
return -ENOMEM;
|
||||
|
@ -3607,7 +3612,7 @@ static const struct pci_device_id nvme_id_table[] = {
|
|||
{ PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
|
||||
.driver_data = NVME_QUIRK_BOGUS_NID, },
|
||||
{ PCI_DEVICE(0x1217, 0x8760), /* O2 Micro 64GB Steam Deck */
|
||||
.driver_data = NVME_QUIRK_QDEPTH_ONE },
|
||||
.driver_data = NVME_QUIRK_DMAPOOL_ALIGN_512, },
|
||||
{ PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */
|
||||
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
|
||||
NVME_QUIRK_BOGUS_NID, },
|
||||
|
|
|
@ -2024,14 +2024,6 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
|
|||
return __nvme_tcp_alloc_io_queues(ctrl);
|
||||
}
|
||||
|
||||
static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
|
||||
{
|
||||
nvme_tcp_stop_io_queues(ctrl);
|
||||
if (remove)
|
||||
nvme_remove_io_tag_set(ctrl);
|
||||
nvme_tcp_free_io_queues(ctrl);
|
||||
}
|
||||
|
||||
static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
|
||||
{
|
||||
int ret, nr_queues;
|
||||
|
@ -2176,9 +2168,11 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
|
|||
nvme_sync_io_queues(ctrl);
|
||||
nvme_tcp_stop_io_queues(ctrl);
|
||||
nvme_cancel_tagset(ctrl);
|
||||
if (remove)
|
||||
if (remove) {
|
||||
nvme_unquiesce_io_queues(ctrl);
|
||||
nvme_tcp_destroy_io_queues(ctrl, remove);
|
||||
nvme_remove_io_tag_set(ctrl);
|
||||
}
|
||||
nvme_tcp_free_io_queues(ctrl);
|
||||
}
|
||||
|
||||
static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl,
|
||||
|
@ -2267,7 +2261,9 @@ destroy_io:
|
|||
nvme_sync_io_queues(ctrl);
|
||||
nvme_tcp_stop_io_queues(ctrl);
|
||||
nvme_cancel_tagset(ctrl);
|
||||
nvme_tcp_destroy_io_queues(ctrl, new);
|
||||
if (new)
|
||||
nvme_remove_io_tag_set(ctrl);
|
||||
nvme_tcp_free_io_queues(ctrl);
|
||||
}
|
||||
destroy_admin:
|
||||
nvme_stop_keep_alive(ctrl);
|
||||
|
|
|
@ -139,7 +139,7 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
|
|||
unsigned long idx;
|
||||
|
||||
ctrl = req->sq->ctrl;
|
||||
xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
|
||||
nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
|
||||
/* we don't have the right data for file backed ns */
|
||||
if (!ns->bdev)
|
||||
continue;
|
||||
|
@ -331,9 +331,10 @@ static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
|
|||
u32 count = 0;
|
||||
|
||||
if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
|
||||
xa_for_each(&ctrl->subsys->namespaces, idx, ns)
|
||||
nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
|
||||
if (ns->anagrpid == grpid)
|
||||
desc->nsids[count++] = cpu_to_le32(ns->nsid);
|
||||
}
|
||||
}
|
||||
|
||||
desc->grpid = cpu_to_le32(grpid);
|
||||
|
@ -772,7 +773,7 @@ static void nvmet_execute_identify_endgrp_list(struct nvmet_req *req)
|
|||
goto out;
|
||||
}
|
||||
|
||||
xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
|
||||
nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
|
||||
if (ns->nsid <= min_endgid)
|
||||
continue;
|
||||
|
||||
|
@ -815,7 +816,7 @@ static void nvmet_execute_identify_nslist(struct nvmet_req *req, bool match_css)
|
|||
goto out;
|
||||
}
|
||||
|
||||
xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
|
||||
nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
|
||||
if (ns->nsid <= min_nsid)
|
||||
continue;
|
||||
if (match_css && req->ns->csi != req->cmd->identify.csi)
|
||||
|
|
|
@ -810,18 +810,6 @@ static struct configfs_attribute *nvmet_ns_attrs[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid)
|
||||
{
|
||||
struct config_item *ns_item;
|
||||
char name[12];
|
||||
|
||||
snprintf(name, sizeof(name), "%u", nsid);
|
||||
mutex_lock(&subsys->namespaces_group.cg_subsys->su_mutex);
|
||||
ns_item = config_group_find_item(&subsys->namespaces_group, name);
|
||||
mutex_unlock(&subsys->namespaces_group.cg_subsys->su_mutex);
|
||||
return ns_item != NULL;
|
||||
}
|
||||
|
||||
static void nvmet_ns_release(struct config_item *item)
|
||||
{
|
||||
struct nvmet_ns *ns = to_nvmet_ns(item);
|
||||
|
@ -2254,12 +2242,17 @@ static ssize_t nvmet_root_discovery_nqn_store(struct config_item *item,
|
|||
const char *page, size_t count)
|
||||
{
|
||||
struct list_head *entry;
|
||||
char *old_nqn, *new_nqn;
|
||||
size_t len;
|
||||
|
||||
len = strcspn(page, "\n");
|
||||
if (!len || len > NVMF_NQN_FIELD_LEN - 1)
|
||||
return -EINVAL;
|
||||
|
||||
new_nqn = kstrndup(page, len, GFP_KERNEL);
|
||||
if (!new_nqn)
|
||||
return -ENOMEM;
|
||||
|
||||
down_write(&nvmet_config_sem);
|
||||
list_for_each(entry, &nvmet_subsystems_group.cg_children) {
|
||||
struct config_item *item =
|
||||
|
@ -2268,13 +2261,15 @@ static ssize_t nvmet_root_discovery_nqn_store(struct config_item *item,
|
|||
if (!strncmp(config_item_name(item), page, len)) {
|
||||
pr_err("duplicate NQN %s\n", config_item_name(item));
|
||||
up_write(&nvmet_config_sem);
|
||||
kfree(new_nqn);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
memset(nvmet_disc_subsys->subsysnqn, 0, NVMF_NQN_FIELD_LEN);
|
||||
memcpy(nvmet_disc_subsys->subsysnqn, page, len);
|
||||
old_nqn = nvmet_disc_subsys->subsysnqn;
|
||||
nvmet_disc_subsys->subsysnqn = new_nqn;
|
||||
up_write(&nvmet_config_sem);
|
||||
|
||||
kfree(old_nqn);
|
||||
return len;
|
||||
}
|
||||
|
||||
|
|
|
@ -127,7 +127,7 @@ static u32 nvmet_max_nsid(struct nvmet_subsys *subsys)
|
|||
unsigned long idx;
|
||||
u32 nsid = 0;
|
||||
|
||||
xa_for_each(&subsys->namespaces, idx, cur)
|
||||
nvmet_for_each_enabled_ns(&subsys->namespaces, idx, cur)
|
||||
nsid = cur->nsid;
|
||||
|
||||
return nsid;
|
||||
|
@ -441,11 +441,14 @@ u16 nvmet_req_find_ns(struct nvmet_req *req)
|
|||
struct nvmet_subsys *subsys = nvmet_req_subsys(req);
|
||||
|
||||
req->ns = xa_load(&subsys->namespaces, nsid);
|
||||
if (unlikely(!req->ns)) {
|
||||
if (unlikely(!req->ns || !req->ns->enabled)) {
|
||||
req->error_loc = offsetof(struct nvme_common_command, nsid);
|
||||
if (nvmet_subsys_nsid_exists(subsys, nsid))
|
||||
return NVME_SC_INTERNAL_PATH_ERROR;
|
||||
return NVME_SC_INVALID_NS | NVME_STATUS_DNR;
|
||||
if (!req->ns) /* ns doesn't exist! */
|
||||
return NVME_SC_INVALID_NS | NVME_STATUS_DNR;
|
||||
|
||||
/* ns exists but it's disabled */
|
||||
req->ns = NULL;
|
||||
return NVME_SC_INTERNAL_PATH_ERROR;
|
||||
}
|
||||
|
||||
percpu_ref_get(&req->ns->ref);
|
||||
|
@ -583,8 +586,6 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
|
|||
goto out_unlock;
|
||||
|
||||
ret = -EMFILE;
|
||||
if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
|
||||
goto out_unlock;
|
||||
|
||||
ret = nvmet_bdev_ns_enable(ns);
|
||||
if (ret == -ENOTBLK)
|
||||
|
@ -599,38 +600,19 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
|
|||
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
|
||||
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
|
||||
|
||||
ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
|
||||
0, GFP_KERNEL);
|
||||
if (ret)
|
||||
goto out_dev_put;
|
||||
|
||||
if (ns->nsid > subsys->max_nsid)
|
||||
subsys->max_nsid = ns->nsid;
|
||||
|
||||
ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL);
|
||||
if (ret)
|
||||
goto out_restore_subsys_maxnsid;
|
||||
|
||||
if (ns->pr.enable) {
|
||||
ret = nvmet_pr_init_ns(ns);
|
||||
if (ret)
|
||||
goto out_remove_from_subsys;
|
||||
goto out_dev_put;
|
||||
}
|
||||
|
||||
subsys->nr_namespaces++;
|
||||
|
||||
nvmet_ns_changed(subsys, ns->nsid);
|
||||
ns->enabled = true;
|
||||
xa_set_mark(&subsys->namespaces, ns->nsid, NVMET_NS_ENABLED);
|
||||
ret = 0;
|
||||
out_unlock:
|
||||
mutex_unlock(&subsys->lock);
|
||||
return ret;
|
||||
|
||||
out_remove_from_subsys:
|
||||
xa_erase(&subsys->namespaces, ns->nsid);
|
||||
out_restore_subsys_maxnsid:
|
||||
subsys->max_nsid = nvmet_max_nsid(subsys);
|
||||
percpu_ref_exit(&ns->ref);
|
||||
out_dev_put:
|
||||
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
|
||||
pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
|
||||
|
@ -649,15 +631,37 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
|
|||
goto out_unlock;
|
||||
|
||||
ns->enabled = false;
|
||||
xa_erase(&ns->subsys->namespaces, ns->nsid);
|
||||
if (ns->nsid == subsys->max_nsid)
|
||||
subsys->max_nsid = nvmet_max_nsid(subsys);
|
||||
xa_clear_mark(&subsys->namespaces, ns->nsid, NVMET_NS_ENABLED);
|
||||
|
||||
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
|
||||
pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
|
||||
|
||||
mutex_unlock(&subsys->lock);
|
||||
|
||||
if (ns->pr.enable)
|
||||
nvmet_pr_exit_ns(ns);
|
||||
|
||||
mutex_lock(&subsys->lock);
|
||||
nvmet_ns_changed(subsys, ns->nsid);
|
||||
nvmet_ns_dev_disable(ns);
|
||||
out_unlock:
|
||||
mutex_unlock(&subsys->lock);
|
||||
}
|
||||
|
||||
void nvmet_ns_free(struct nvmet_ns *ns)
|
||||
{
|
||||
struct nvmet_subsys *subsys = ns->subsys;
|
||||
|
||||
nvmet_ns_disable(ns);
|
||||
|
||||
mutex_lock(&subsys->lock);
|
||||
|
||||
xa_erase(&subsys->namespaces, ns->nsid);
|
||||
if (ns->nsid == subsys->max_nsid)
|
||||
subsys->max_nsid = nvmet_max_nsid(subsys);
|
||||
|
||||
mutex_unlock(&subsys->lock);
|
||||
|
||||
/*
|
||||
* Now that we removed the namespaces from the lookup list, we
|
||||
* can kill the per_cpu ref and wait for any remaining references
|
||||
|
@ -671,21 +675,9 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
|
|||
wait_for_completion(&ns->disable_done);
|
||||
percpu_ref_exit(&ns->ref);
|
||||
|
||||
if (ns->pr.enable)
|
||||
nvmet_pr_exit_ns(ns);
|
||||
|
||||
mutex_lock(&subsys->lock);
|
||||
|
||||
subsys->nr_namespaces--;
|
||||
nvmet_ns_changed(subsys, ns->nsid);
|
||||
nvmet_ns_dev_disable(ns);
|
||||
out_unlock:
|
||||
mutex_unlock(&subsys->lock);
|
||||
}
|
||||
|
||||
void nvmet_ns_free(struct nvmet_ns *ns)
|
||||
{
|
||||
nvmet_ns_disable(ns);
|
||||
|
||||
down_write(&nvmet_ana_sem);
|
||||
nvmet_ana_group_enabled[ns->anagrpid]--;
|
||||
|
@ -699,15 +691,33 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
|
|||
{
|
||||
struct nvmet_ns *ns;
|
||||
|
||||
mutex_lock(&subsys->lock);
|
||||
|
||||
if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
|
||||
goto out_unlock;
|
||||
|
||||
ns = kzalloc(sizeof(*ns), GFP_KERNEL);
|
||||
if (!ns)
|
||||
return NULL;
|
||||
goto out_unlock;
|
||||
|
||||
init_completion(&ns->disable_done);
|
||||
|
||||
ns->nsid = nsid;
|
||||
ns->subsys = subsys;
|
||||
|
||||
if (percpu_ref_init(&ns->ref, nvmet_destroy_namespace, 0, GFP_KERNEL))
|
||||
goto out_free;
|
||||
|
||||
if (ns->nsid > subsys->max_nsid)
|
||||
subsys->max_nsid = nsid;
|
||||
|
||||
if (xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL))
|
||||
goto out_exit;
|
||||
|
||||
subsys->nr_namespaces++;
|
||||
|
||||
mutex_unlock(&subsys->lock);
|
||||
|
||||
down_write(&nvmet_ana_sem);
|
||||
ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
|
||||
nvmet_ana_group_enabled[ns->anagrpid]++;
|
||||
|
@ -718,6 +728,14 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
|
|||
ns->csi = NVME_CSI_NVM;
|
||||
|
||||
return ns;
|
||||
out_exit:
|
||||
subsys->max_nsid = nvmet_max_nsid(subsys);
|
||||
percpu_ref_exit(&ns->ref);
|
||||
out_free:
|
||||
kfree(ns);
|
||||
out_unlock:
|
||||
mutex_unlock(&subsys->lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void nvmet_update_sq_head(struct nvmet_req *req)
|
||||
|
@ -1394,7 +1412,7 @@ static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
|
|||
|
||||
ctrl->p2p_client = get_device(req->p2p_client);
|
||||
|
||||
xa_for_each(&ctrl->subsys->namespaces, idx, ns)
|
||||
nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns)
|
||||
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
|
||||
}
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
|
|||
*/
|
||||
id->nsfeat |= 1 << 4;
|
||||
/* NPWG = Namespace Preferred Write Granularity. 0's based */
|
||||
id->npwg = lpp0b;
|
||||
id->npwg = to0based(bdev_io_min(bdev) / bdev_logical_block_size(bdev));
|
||||
/* NPWA = Namespace Preferred Write Alignment. 0's based */
|
||||
id->npwa = id->npwg;
|
||||
/* NPDG = Namespace Preferred Deallocate Granularity. 0's based */
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
|
||||
#define NVMET_DEFAULT_VS NVME_VS(2, 1, 0)
|
||||
|
||||
#define NVMET_NS_ENABLED XA_MARK_1
|
||||
#define NVMET_ASYNC_EVENTS 4
|
||||
#define NVMET_ERROR_LOG_SLOTS 128
|
||||
#define NVMET_NO_ERROR_LOC ((u16)-1)
|
||||
|
@ -33,6 +34,12 @@
|
|||
#define NVMET_FR_MAX_SIZE 8
|
||||
#define NVMET_PR_LOG_QUEUE_SIZE 64
|
||||
|
||||
#define nvmet_for_each_ns(xa, index, entry) \
|
||||
xa_for_each(xa, index, entry)
|
||||
|
||||
#define nvmet_for_each_enabled_ns(xa, index, entry) \
|
||||
xa_for_each_marked(xa, index, entry, NVMET_NS_ENABLED)
|
||||
|
||||
/*
|
||||
* Supported optional AENs:
|
||||
*/
|
||||
|
|
|
@ -60,7 +60,7 @@ u16 nvmet_set_feat_resv_notif_mask(struct nvmet_req *req, u32 mask)
|
|||
goto success;
|
||||
}
|
||||
|
||||
xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
|
||||
nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
|
||||
if (ns->pr.enable)
|
||||
WRITE_ONCE(ns->pr.notify_mask, mask);
|
||||
}
|
||||
|
@ -1056,7 +1056,7 @@ int nvmet_ctrl_init_pr(struct nvmet_ctrl *ctrl)
|
|||
* nvmet_pr_init_ns(), see more details in nvmet_ns_enable().
|
||||
* So just check ns->pr.enable.
|
||||
*/
|
||||
xa_for_each(&subsys->namespaces, idx, ns) {
|
||||
nvmet_for_each_enabled_ns(&subsys->namespaces, idx, ns) {
|
||||
if (ns->pr.enable) {
|
||||
ret = nvmet_pr_alloc_and_insert_pc_ref(ns, ctrl->cntlid,
|
||||
&ctrl->hostid);
|
||||
|
@ -1067,7 +1067,7 @@ int nvmet_ctrl_init_pr(struct nvmet_ctrl *ctrl)
|
|||
return 0;
|
||||
|
||||
free_per_ctrl_refs:
|
||||
xa_for_each(&subsys->namespaces, idx, ns) {
|
||||
nvmet_for_each_enabled_ns(&subsys->namespaces, idx, ns) {
|
||||
if (ns->pr.enable) {
|
||||
pc_ref = xa_erase(&ns->pr_per_ctrl_refs, ctrl->cntlid);
|
||||
if (pc_ref)
|
||||
|
@ -1087,7 +1087,7 @@ void nvmet_ctrl_destroy_pr(struct nvmet_ctrl *ctrl)
|
|||
kfifo_free(&ctrl->pr_log_mgr.log_queue);
|
||||
mutex_destroy(&ctrl->pr_log_mgr.lock);
|
||||
|
||||
xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
|
||||
nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
|
||||
if (ns->pr.enable) {
|
||||
pc_ref = xa_erase(&ns->pr_per_ctrl_refs, ctrl->cntlid);
|
||||
if (pc_ref)
|
||||
|
|
Loading…
Add table
Reference in a new issue