1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

nvmet: Improve nvmet_alloc_ctrl() interface and implementation

Introduce struct nvmet_alloc_ctrl_args to define the arguments for
the function nvmet_alloc_ctrl() to avoid the need for passing a pointer
to a struct nvmet_req as an argument. This new data structure aggregates
together the arguments that were passed to nvmet_alloc_ctrl()
(subsysnqn, hostnqn and kato), together with the struct nvmet_req fields
used by nvmet_alloc_ctrl(), that is, the fields port, p2p_client, and
ops as input and the result and error_loc fields as output, as well as a
status field. nvmet_alloc_ctrl() is also changed to return a pointer
to the allocated and initialized controller structure instead of a
status code, as the status is now returned through the status field of
struct nvmet_alloc_ctrl_args.

The function nvmet_setup_p2p_ns_map() is changed to not take a pointer
to a struct nvmet_req as argument, instead, directly specify the
p2p_client device pointer needed as argument.

The code in nvmet_execute_admin_connect() that initializes a new target
controller after allocating it is moved into nvmet_alloc_ctrl().
The code that sets up an admin queue for the controller (and the call
to nvmet_install_queue()) remains in nvmet_execute_admin_connect().

Finally, nvmet_alloc_ctrl() is also exported to allow target drivers to
use this function directly to allocate and initialize a new controller
structure without the need to rely on a fabrics connect command request.

Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Tested-by: Rick Wertenbroek <rick.wertenbroek@gmail.com>
Tested-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
Signed-off-by: Keith Busch <kbusch@kernel.org>
This commit is contained in:
Damien Le Moal 2025-01-04 13:59:40 +09:00 committed by Keith Busch
parent 200adac758
commit 6202783184
3 changed files with 95 additions and 66 deletions

View file

@ -1350,15 +1350,15 @@ bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
* Note: ctrl->subsys->lock should be held when calling this function * Note: ctrl->subsys->lock should be held when calling this function
*/ */
static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl, static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
struct nvmet_req *req) struct device *p2p_client)
{ {
struct nvmet_ns *ns; struct nvmet_ns *ns;
unsigned long idx; unsigned long idx;
if (!req->p2p_client) if (!p2p_client)
return; return;
ctrl->p2p_client = get_device(req->p2p_client); ctrl->p2p_client = get_device(p2p_client);
xa_for_each(&ctrl->subsys->namespaces, idx, ns) xa_for_each(&ctrl->subsys->namespaces, idx, ns)
nvmet_p2pmem_ns_add_p2p(ctrl, ns); nvmet_p2pmem_ns_add_p2p(ctrl, ns);
@ -1387,45 +1387,44 @@ static void nvmet_fatal_error_handler(struct work_struct *work)
ctrl->ops->delete_ctrl(ctrl); ctrl->ops->delete_ctrl(ctrl);
} }
u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args)
struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp,
uuid_t *hostid)
{ {
struct nvmet_subsys *subsys; struct nvmet_subsys *subsys;
struct nvmet_ctrl *ctrl; struct nvmet_ctrl *ctrl;
u32 kato = args->kato;
u8 dhchap_status;
int ret; int ret;
u16 status;
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; args->status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
subsys = nvmet_find_get_subsys(req->port, subsysnqn); subsys = nvmet_find_get_subsys(args->port, args->subsysnqn);
if (!subsys) { if (!subsys) {
pr_warn("connect request for invalid subsystem %s!\n", pr_warn("connect request for invalid subsystem %s!\n",
subsysnqn); args->subsysnqn);
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); args->result = IPO_IATTR_CONNECT_DATA(subsysnqn);
req->error_loc = offsetof(struct nvme_common_command, dptr); args->error_loc = offsetof(struct nvme_common_command, dptr);
goto out; return NULL;
} }
down_read(&nvmet_config_sem); down_read(&nvmet_config_sem);
if (!nvmet_host_allowed(subsys, hostnqn)) { if (!nvmet_host_allowed(subsys, args->hostnqn)) {
pr_info("connect by host %s for subsystem %s not allowed\n", pr_info("connect by host %s for subsystem %s not allowed\n",
hostnqn, subsysnqn); args->hostnqn, args->subsysnqn);
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn); args->result = IPO_IATTR_CONNECT_DATA(hostnqn);
up_read(&nvmet_config_sem); up_read(&nvmet_config_sem);
status = NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR; args->status = NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR;
req->error_loc = offsetof(struct nvme_common_command, dptr); args->error_loc = offsetof(struct nvme_common_command, dptr);
goto out_put_subsystem; goto out_put_subsystem;
} }
up_read(&nvmet_config_sem); up_read(&nvmet_config_sem);
status = NVME_SC_INTERNAL; args->status = NVME_SC_INTERNAL;
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
if (!ctrl) if (!ctrl)
goto out_put_subsystem; goto out_put_subsystem;
mutex_init(&ctrl->lock); mutex_init(&ctrl->lock);
ctrl->port = req->port; ctrl->port = args->port;
ctrl->ops = req->ops; ctrl->ops = args->ops;
#ifdef CONFIG_NVME_TARGET_PASSTHRU #ifdef CONFIG_NVME_TARGET_PASSTHRU
/* By default, set loop targets to clear IDS by default */ /* By default, set loop targets to clear IDS by default */
@ -1439,8 +1438,8 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer); INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); memcpy(ctrl->subsysnqn, args->subsysnqn, NVMF_NQN_SIZE);
memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); memcpy(ctrl->hostnqn, args->hostnqn, NVMF_NQN_SIZE);
kref_init(&ctrl->ref); kref_init(&ctrl->ref);
ctrl->subsys = subsys; ctrl->subsys = subsys;
@ -1463,12 +1462,12 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
subsys->cntlid_min, subsys->cntlid_max, subsys->cntlid_min, subsys->cntlid_max,
GFP_KERNEL); GFP_KERNEL);
if (ret < 0) { if (ret < 0) {
status = NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR; args->status = NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR;
goto out_free_sqs; goto out_free_sqs;
} }
ctrl->cntlid = ret; ctrl->cntlid = ret;
uuid_copy(&ctrl->hostid, hostid); uuid_copy(&ctrl->hostid, args->hostid);
/* /*
* Discovery controllers may use some arbitrary high value * Discovery controllers may use some arbitrary high value
@ -1490,12 +1489,35 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
if (ret) if (ret)
goto init_pr_fail; goto init_pr_fail;
list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
nvmet_setup_p2p_ns_map(ctrl, req); nvmet_setup_p2p_ns_map(ctrl, args->p2p_client);
nvmet_debugfs_ctrl_setup(ctrl); nvmet_debugfs_ctrl_setup(ctrl);
mutex_unlock(&subsys->lock); mutex_unlock(&subsys->lock);
*ctrlp = ctrl; if (args->hostid)
return 0; uuid_copy(&ctrl->hostid, args->hostid);
dhchap_status = nvmet_setup_auth(ctrl);
if (dhchap_status) {
pr_err("Failed to setup authentication, dhchap status %u\n",
dhchap_status);
nvmet_ctrl_put(ctrl);
if (dhchap_status == NVME_AUTH_DHCHAP_FAILURE_FAILED)
args->status =
NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR;
else
args->status = NVME_SC_INTERNAL;
return NULL;
}
args->status = NVME_SC_SUCCESS;
pr_info("Created %s controller %d for subsystem %s for NQN %s%s%s.\n",
nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm",
ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
ctrl->pi_support ? " T10-PI is enabled" : "",
nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : "");
return ctrl;
init_pr_fail: init_pr_fail:
mutex_unlock(&subsys->lock); mutex_unlock(&subsys->lock);
@ -1509,9 +1531,9 @@ out_free_ctrl:
kfree(ctrl); kfree(ctrl);
out_put_subsystem: out_put_subsystem:
nvmet_subsys_put(subsys); nvmet_subsys_put(subsys);
out: return NULL;
return status;
} }
EXPORT_SYMBOL_GPL(nvmet_alloc_ctrl);
static void nvmet_ctrl_free(struct kref *ref) static void nvmet_ctrl_free(struct kref *ref)
{ {
@ -1547,6 +1569,7 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
{ {
kref_put(&ctrl->ref, nvmet_ctrl_free); kref_put(&ctrl->ref, nvmet_ctrl_free);
} }
EXPORT_SYMBOL_GPL(nvmet_ctrl_put);
void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
{ {

View file

@ -213,73 +213,67 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
struct nvmf_connect_command *c = &req->cmd->connect; struct nvmf_connect_command *c = &req->cmd->connect;
struct nvmf_connect_data *d; struct nvmf_connect_data *d;
struct nvmet_ctrl *ctrl = NULL; struct nvmet_ctrl *ctrl = NULL;
u16 status; struct nvmet_alloc_ctrl_args args = {
u8 dhchap_status; .port = req->port,
.ops = req->ops,
.p2p_client = req->p2p_client,
.kato = le32_to_cpu(c->kato),
};
if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data))) if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
return; return;
d = kmalloc(sizeof(*d), GFP_KERNEL); d = kmalloc(sizeof(*d), GFP_KERNEL);
if (!d) { if (!d) {
status = NVME_SC_INTERNAL; args.status = NVME_SC_INTERNAL;
goto complete; goto complete;
} }
status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d)); args.status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
if (status) if (args.status)
goto out; goto out;
if (c->recfmt != 0) { if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n", pr_warn("invalid connect version (%d).\n",
le16_to_cpu(c->recfmt)); le16_to_cpu(c->recfmt));
req->error_loc = offsetof(struct nvmf_connect_command, recfmt); args.error_loc = offsetof(struct nvmf_connect_command, recfmt);
status = NVME_SC_CONNECT_FORMAT | NVME_STATUS_DNR; args.status = NVME_SC_CONNECT_FORMAT | NVME_STATUS_DNR;
goto out; goto out;
} }
if (unlikely(d->cntlid != cpu_to_le16(0xffff))) { if (unlikely(d->cntlid != cpu_to_le16(0xffff))) {
pr_warn("connect attempt for invalid controller ID %#x\n", pr_warn("connect attempt for invalid controller ID %#x\n",
d->cntlid); d->cntlid);
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; args.status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); args.result = IPO_IATTR_CONNECT_DATA(cntlid);
goto out; goto out;
} }
d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0'; d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0'; d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
le32_to_cpu(c->kato), &ctrl, &d->hostid); args.subsysnqn = d->subsysnqn;
if (status) args.hostnqn = d->hostnqn;
args.hostid = &d->hostid;
args.kato = c->kato;
ctrl = nvmet_alloc_ctrl(&args);
if (!ctrl)
goto out; goto out;
dhchap_status = nvmet_setup_auth(ctrl); args.status = nvmet_install_queue(ctrl, req);
if (dhchap_status) { if (args.status) {
pr_err("Failed to setup authentication, dhchap status %u\n",
dhchap_status);
nvmet_ctrl_put(ctrl);
if (dhchap_status == NVME_AUTH_DHCHAP_FAILURE_FAILED)
status = (NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR);
else
status = NVME_SC_INTERNAL;
goto out;
}
status = nvmet_install_queue(ctrl, req);
if (status) {
nvmet_ctrl_put(ctrl); nvmet_ctrl_put(ctrl);
goto out; goto out;
} }
pr_info("creating %s controller %d for subsystem %s for NQN %s%s%s.\n", args.result = cpu_to_le32(nvmet_connect_result(ctrl));
nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm",
ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
ctrl->pi_support ? " T10-PI is enabled" : "",
nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : "");
req->cqe->result.u32 = cpu_to_le32(nvmet_connect_result(ctrl));
out: out:
kfree(d); kfree(d);
complete: complete:
nvmet_req_complete(req, status); req->error_loc = args.error_loc;
req->cqe->result.u32 = args.result;
nvmet_req_complete(req, args.status);
} }
static void nvmet_execute_io_connect(struct nvmet_req *req) static void nvmet_execute_io_connect(struct nvmet_req *req)

View file

@ -549,9 +549,21 @@ int nvmet_sq_init(struct nvmet_sq *sq);
void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl); void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new); void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp, struct nvmet_alloc_ctrl_args {
uuid_t *hostid); struct nvmet_port *port;
char *subsysnqn;
char *hostnqn;
uuid_t *hostid;
const struct nvmet_fabrics_ops *ops;
struct device *p2p_client;
u32 kato;
u32 result;
u16 error_loc;
u16 status;
};
struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args);
struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn, struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
const char *hostnqn, u16 cntlid, const char *hostnqn, u16 cntlid,
struct nvmet_req *req); struct nvmet_req *req);