| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (c) 2016 Avago Technologies. All rights reserved. |
| 4 | */ |
| 5 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 6 | #include <linux/module.h> |
| 7 | #include <linux/slab.h> |
| 8 | #include <linux/blk-mq.h> |
| 9 | #include <linux/parser.h> |
| 10 | #include <linux/random.h> |
| 11 | #include <uapi/scsi/fc/fc_fs.h> |
| 12 | #include <uapi/scsi/fc/fc_els.h> |
| 13 | |
| 14 | #include "nvmet.h" |
| 15 | #include <linux/nvme-fc-driver.h> |
| 16 | #include <linux/nvme-fc.h> |
| 17 | #include "../host/fc.h" |
| 18 | |
| 19 | |
| 20 | /* *************************** Data Structures/Defines ****************** */ |
| 21 | |
| 22 | |
| 23 | #define NVMET_LS_CTX_COUNT 256 |
| 24 | |
| 25 | struct nvmet_fc_tgtport; |
| 26 | struct nvmet_fc_tgt_assoc; |
| 27 | |
| 28 | struct nvmet_fc_ls_iod { /* for an LS RQST RCV */ |
| 29 | struct nvmefc_ls_rsp *lsrsp; |
| 30 | struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */ |
| 31 | |
| 32 | struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */ |
| 33 | |
| 34 | struct nvmet_fc_tgtport *tgtport; |
| 35 | struct nvmet_fc_tgt_assoc *assoc; |
| 36 | void *hosthandle; |
| 37 | |
| 38 | union nvmefc_ls_requests *rqstbuf; |
| 39 | union nvmefc_ls_responses *rspbuf; |
| 40 | u16 rqstdatalen; |
| 41 | dma_addr_t rspdma; |
| 42 | |
| 43 | struct scatterlist sg[2]; |
| 44 | |
| 45 | struct work_struct work; |
| 46 | } __aligned(sizeof(unsigned long long)); |
| 47 | |
| 48 | struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */ |
| 49 | struct nvmefc_ls_req ls_req; |
| 50 | |
| 51 | struct nvmet_fc_tgtport *tgtport; |
| 52 | void *hosthandle; |
| 53 | |
| 54 | int ls_error; |
| 55 | struct list_head lsreq_list; /* tgtport->ls_req_list */ |
| 56 | bool req_queued; |
| 57 | |
| 58 | struct work_struct put_work; |
| 59 | }; |
| 60 | |
| 61 | |
| 62 | /* desired maximum for a single sequence - if sg list allows it */ |
| 63 | #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024) |
| 64 | |
| 65 | enum nvmet_fcp_datadir { |
| 66 | NVMET_FCP_NODATA, |
| 67 | NVMET_FCP_WRITE, |
| 68 | NVMET_FCP_READ, |
| 69 | NVMET_FCP_ABORTED, |
| 70 | }; |
| 71 | |
| 72 | struct nvmet_fc_fcp_iod { |
| 73 | struct nvmefc_tgt_fcp_req *fcpreq; |
| 74 | |
| 75 | struct nvme_fc_cmd_iu cmdiubuf; |
| 76 | struct nvme_fc_ersp_iu rspiubuf; |
| 77 | dma_addr_t rspdma; |
| 78 | struct scatterlist *next_sg; |
| 79 | struct scatterlist *data_sg; |
| 80 | int data_sg_cnt; |
| 81 | u32 offset; |
| 82 | enum nvmet_fcp_datadir io_dir; |
| 83 | bool active; |
| 84 | bool abort; |
| 85 | bool aborted; |
| 86 | bool writedataactive; |
| 87 | spinlock_t flock; |
| 88 | |
| 89 | struct nvmet_req req; |
| 90 | struct work_struct defer_work; |
| 91 | |
| 92 | struct nvmet_fc_tgtport *tgtport; |
| 93 | struct nvmet_fc_tgt_queue *queue; |
| 94 | |
| 95 | struct list_head fcp_list; /* tgtport->fcp_list */ |
| 96 | }; |
| 97 | |
| 98 | struct nvmet_fc_tgtport { |
| 99 | struct nvmet_fc_target_port fc_target_port; |
| 100 | |
| 101 | struct list_head tgt_list; /* nvmet_fc_target_list */ |
| 102 | struct device *dev; /* dev for dma mapping */ |
| 103 | struct nvmet_fc_target_template *ops; |
| 104 | |
| 105 | struct nvmet_fc_ls_iod *iod; |
| 106 | spinlock_t lock; |
| 107 | struct list_head ls_rcv_list; |
| 108 | struct list_head ls_req_list; |
| 109 | struct list_head ls_busylist; |
| 110 | struct list_head assoc_list; |
| 111 | struct list_head host_list; |
| 112 | struct ida assoc_cnt; |
| 113 | struct nvmet_fc_port_entry *pe; |
| 114 | struct kref ref; |
| 115 | u32 max_sg_cnt; |
| 116 | }; |
| 117 | |
| 118 | struct nvmet_fc_port_entry { |
| 119 | struct nvmet_fc_tgtport *tgtport; |
| 120 | struct nvmet_port *port; |
| 121 | u64 node_name; |
| 122 | u64 port_name; |
| 123 | struct list_head pe_list; |
| 124 | }; |
| 125 | |
| 126 | struct nvmet_fc_defer_fcp_req { |
| 127 | struct list_head req_list; |
| 128 | struct nvmefc_tgt_fcp_req *fcp_req; |
| 129 | }; |
| 130 | |
| 131 | struct nvmet_fc_tgt_queue { |
| 132 | bool ninetypercent; |
| 133 | u16 qid; |
| 134 | u16 sqsize; |
| 135 | u16 ersp_ratio; |
| 136 | __le16 sqhd; |
| 137 | atomic_t connected; |
| 138 | atomic_t sqtail; |
| 139 | atomic_t zrspcnt; |
| 140 | atomic_t rsn; |
| 141 | spinlock_t qlock; |
| 142 | struct nvmet_cq nvme_cq; |
| 143 | struct nvmet_sq nvme_sq; |
| 144 | struct nvmet_fc_tgt_assoc *assoc; |
| 145 | struct list_head fod_list; |
| 146 | struct list_head pending_cmd_list; |
| 147 | struct list_head avail_defer_list; |
| 148 | struct workqueue_struct *work_q; |
| 149 | struct kref ref; |
| 150 | /* array of fcp_iods */ |
| 151 | struct nvmet_fc_fcp_iod fod[] /* __counted_by(sqsize) */; |
| 152 | } __aligned(sizeof(unsigned long long)); |
| 153 | |
| 154 | struct nvmet_fc_hostport { |
| 155 | struct nvmet_fc_tgtport *tgtport; |
| 156 | void *hosthandle; |
| 157 | struct list_head host_list; |
| 158 | struct kref ref; |
| 159 | u8 invalid; |
| 160 | }; |
| 161 | |
| 162 | struct nvmet_fc_tgt_assoc { |
| 163 | u64 association_id; |
| 164 | u32 a_id; |
| 165 | atomic_t terminating; |
| 166 | struct nvmet_fc_tgtport *tgtport; |
| 167 | struct nvmet_fc_hostport *hostport; |
| 168 | struct nvmet_fc_ls_iod *rcv_disconn; |
| 169 | struct list_head a_list; |
| 170 | struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1]; |
| 171 | struct kref ref; |
| 172 | struct work_struct del_work; |
| 173 | }; |
| 174 | |
| 175 | /* |
| 176 | * Association and Connection IDs: |
| 177 | * |
| 178 | * Association ID will have random number in upper 6 bytes and zero |
| 179 | * in lower 2 bytes |
| 180 | * |
| 181 | * Connection IDs will be Association ID with QID or'd in lower 2 bytes |
| 182 | * |
| 183 | * note: Association ID = Connection ID for queue 0 |
| 184 | */ |
| 185 | #define BYTES_FOR_QID sizeof(u16) |
| 186 | #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8) |
| 187 | #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1)) |
| 188 | |
| 189 | static inline u64 |
| 190 | nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid) |
| 191 | { |
| 192 | return (assoc->association_id | qid); |
| 193 | } |
| 194 | |
| 195 | static inline u64 |
| 196 | nvmet_fc_getassociationid(u64 connectionid) |
| 197 | { |
| 198 | return connectionid & ~NVMET_FC_QUEUEID_MASK; |
| 199 | } |
| 200 | |
| 201 | static inline u16 |
| 202 | nvmet_fc_getqueueid(u64 connectionid) |
| 203 | { |
| 204 | return (u16)(connectionid & NVMET_FC_QUEUEID_MASK); |
| 205 | } |
| 206 | |
| 207 | static inline struct nvmet_fc_tgtport * |
| 208 | targetport_to_tgtport(struct nvmet_fc_target_port *targetport) |
| 209 | { |
| 210 | return container_of(targetport, struct nvmet_fc_tgtport, |
| 211 | fc_target_port); |
| 212 | } |
| 213 | |
| 214 | static inline struct nvmet_fc_fcp_iod * |
| 215 | nvmet_req_to_fod(struct nvmet_req *nvme_req) |
| 216 | { |
| 217 | return container_of(nvme_req, struct nvmet_fc_fcp_iod, req); |
| 218 | } |
| 219 | |
| 220 | |
| 221 | /* *************************** Globals **************************** */ |
| 222 | |
| 223 | |
| 224 | static DEFINE_SPINLOCK(nvmet_fc_tgtlock); |
| 225 | |
| 226 | static LIST_HEAD(nvmet_fc_target_list); |
| 227 | static DEFINE_IDA(nvmet_fc_tgtport_cnt); |
| 228 | static LIST_HEAD(nvmet_fc_portentry_list); |
| 229 | |
| 230 | |
| 231 | static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work); |
| 232 | static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work); |
| 233 | static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc); |
| 234 | static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc); |
| 235 | static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue); |
| 236 | static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); |
| 237 | static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); |
| 238 | static void nvmet_fc_put_lsop_work(struct work_struct *work) |
| 239 | { |
| 240 | struct nvmet_fc_ls_req_op *lsop = |
| 241 | container_of(work, struct nvmet_fc_ls_req_op, put_work); |
| 242 | |
| 243 | nvmet_fc_tgtport_put(tgtport: lsop->tgtport); |
| 244 | kfree(objp: lsop); |
| 245 | } |
| 246 | static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); |
| 247 | static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, |
| 248 | struct nvmet_fc_fcp_iod *fod); |
| 249 | static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc); |
| 250 | static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, |
| 251 | struct nvmet_fc_ls_iod *iod); |
| 252 | |
| 253 | |
| 254 | /* *********************** FC-NVME DMA Handling **************************** */ |
| 255 | |
| 256 | /* |
| 257 | * The fcloop device passes in a NULL device pointer. Real LLD's will |
| 258 | * pass in a valid device pointer. If NULL is passed to the dma mapping |
| 259 | * routines, depending on the platform, it may or may not succeed, and |
| 260 | * may crash. |
| 261 | * |
| 262 | * As such: |
| 263 | * Wrapper all the dma routines and check the dev pointer. |
| 264 | * |
| 265 | * If simple mappings (return just a dma address, we'll noop them, |
| 266 | * returning a dma address of 0. |
| 267 | * |
| 268 | * On more complex mappings (dma_map_sg), a pseudo routine fills |
| 269 | * in the scatter list, setting all dma addresses to 0. |
| 270 | */ |
| 271 | |
| 272 | static inline dma_addr_t |
| 273 | fc_dma_map_single(struct device *dev, void *ptr, size_t size, |
| 274 | enum dma_data_direction dir) |
| 275 | { |
| 276 | return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L; |
| 277 | } |
| 278 | |
| 279 | static inline int |
| 280 | fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
| 281 | { |
| 282 | return dev ? dma_mapping_error(dev, dma_addr) : 0; |
| 283 | } |
| 284 | |
| 285 | static inline void |
| 286 | fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, |
| 287 | enum dma_data_direction dir) |
| 288 | { |
| 289 | if (dev) |
| 290 | dma_unmap_single(dev, addr, size, dir); |
| 291 | } |
| 292 | |
| 293 | static inline void |
| 294 | fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, |
| 295 | enum dma_data_direction dir) |
| 296 | { |
| 297 | if (dev) |
| 298 | dma_sync_single_for_cpu(dev, addr, size, dir); |
| 299 | } |
| 300 | |
| 301 | static inline void |
| 302 | fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, |
| 303 | enum dma_data_direction dir) |
| 304 | { |
| 305 | if (dev) |
| 306 | dma_sync_single_for_device(dev, addr, size, dir); |
| 307 | } |
| 308 | |
| 309 | /* pseudo dma_map_sg call */ |
| 310 | static int |
| 311 | fc_map_sg(struct scatterlist *sg, int nents) |
| 312 | { |
| 313 | struct scatterlist *s; |
| 314 | int i; |
| 315 | |
| 316 | WARN_ON(nents == 0 || sg[0].length == 0); |
| 317 | |
| 318 | for_each_sg(sg, s, nents, i) { |
| 319 | s->dma_address = 0L; |
| 320 | #ifdef CONFIG_NEED_SG_DMA_LENGTH |
| 321 | s->dma_length = s->length; |
| 322 | #endif |
| 323 | } |
| 324 | return nents; |
| 325 | } |
| 326 | |
| 327 | static inline int |
| 328 | fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
| 329 | enum dma_data_direction dir) |
| 330 | { |
| 331 | return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents); |
| 332 | } |
| 333 | |
| 334 | static inline void |
| 335 | fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
| 336 | enum dma_data_direction dir) |
| 337 | { |
| 338 | if (dev) |
| 339 | dma_unmap_sg(dev, sg, nents, dir); |
| 340 | } |
| 341 | |
| 342 | |
| 343 | /* ********************** FC-NVME LS XMT Handling ************************* */ |
| 344 | |
| 345 | |
| 346 | static void |
| 347 | __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop) |
| 348 | { |
| 349 | struct nvmet_fc_tgtport *tgtport = lsop->tgtport; |
| 350 | struct nvmefc_ls_req *lsreq = &lsop->ls_req; |
| 351 | unsigned long flags; |
| 352 | |
| 353 | spin_lock_irqsave(&tgtport->lock, flags); |
| 354 | |
| 355 | if (!lsop->req_queued) { |
| 356 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
| 357 | goto out_putwork; |
| 358 | } |
| 359 | |
| 360 | list_del(entry: &lsop->lsreq_list); |
| 361 | |
| 362 | lsop->req_queued = false; |
| 363 | |
| 364 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
| 365 | |
| 366 | fc_dma_unmap_single(dev: tgtport->dev, addr: lsreq->rqstdma, |
| 367 | size: (lsreq->rqstlen + lsreq->rsplen), |
| 368 | dir: DMA_BIDIRECTIONAL); |
| 369 | |
| 370 | out_putwork: |
| 371 | queue_work(wq: nvmet_wq, work: &lsop->put_work); |
| 372 | } |
| 373 | |
| 374 | static int |
| 375 | __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport, |
| 376 | struct nvmet_fc_ls_req_op *lsop, |
| 377 | void (*done)(struct nvmefc_ls_req *req, int status)) |
| 378 | { |
| 379 | struct nvmefc_ls_req *lsreq = &lsop->ls_req; |
| 380 | unsigned long flags; |
| 381 | int ret = 0; |
| 382 | |
| 383 | if (!tgtport->ops->ls_req) |
| 384 | return -EOPNOTSUPP; |
| 385 | |
| 386 | if (!nvmet_fc_tgtport_get(tgtport)) |
| 387 | return -ESHUTDOWN; |
| 388 | |
| 389 | lsreq->done = done; |
| 390 | lsop->req_queued = false; |
| 391 | INIT_LIST_HEAD(list: &lsop->lsreq_list); |
| 392 | INIT_WORK(&lsop->put_work, nvmet_fc_put_lsop_work); |
| 393 | |
| 394 | lsreq->rqstdma = fc_dma_map_single(dev: tgtport->dev, ptr: lsreq->rqstaddr, |
| 395 | size: lsreq->rqstlen + lsreq->rsplen, |
| 396 | dir: DMA_BIDIRECTIONAL); |
| 397 | if (fc_dma_mapping_error(dev: tgtport->dev, dma_addr: lsreq->rqstdma)) { |
| 398 | ret = -EFAULT; |
| 399 | goto out_puttgtport; |
| 400 | } |
| 401 | lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; |
| 402 | |
| 403 | spin_lock_irqsave(&tgtport->lock, flags); |
| 404 | |
| 405 | list_add_tail(new: &lsop->lsreq_list, head: &tgtport->ls_req_list); |
| 406 | |
| 407 | lsop->req_queued = true; |
| 408 | |
| 409 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
| 410 | |
| 411 | ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle, |
| 412 | lsreq); |
| 413 | if (ret) |
| 414 | goto out_unlink; |
| 415 | |
| 416 | return 0; |
| 417 | |
| 418 | out_unlink: |
| 419 | lsop->ls_error = ret; |
| 420 | spin_lock_irqsave(&tgtport->lock, flags); |
| 421 | lsop->req_queued = false; |
| 422 | list_del(entry: &lsop->lsreq_list); |
| 423 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
| 424 | fc_dma_unmap_single(dev: tgtport->dev, addr: lsreq->rqstdma, |
| 425 | size: (lsreq->rqstlen + lsreq->rsplen), |
| 426 | dir: DMA_BIDIRECTIONAL); |
| 427 | out_puttgtport: |
| 428 | nvmet_fc_tgtport_put(tgtport); |
| 429 | |
| 430 | return ret; |
| 431 | } |
| 432 | |
| 433 | static int |
| 434 | nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport, |
| 435 | struct nvmet_fc_ls_req_op *lsop, |
| 436 | void (*done)(struct nvmefc_ls_req *req, int status)) |
| 437 | { |
| 438 | /* don't wait for completion */ |
| 439 | |
| 440 | return __nvmet_fc_send_ls_req(tgtport, lsop, done); |
| 441 | } |
| 442 | |
| 443 | static void |
| 444 | nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) |
| 445 | { |
| 446 | struct nvmet_fc_ls_req_op *lsop = |
| 447 | container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req); |
| 448 | |
| 449 | __nvmet_fc_finish_ls_req(lsop); |
| 450 | |
| 451 | /* fc-nvme target doesn't care about success or failure of cmd */ |
| 452 | } |
| 453 | |
| 454 | /* |
| 455 | * This routine sends a FC-NVME LS to disconnect (aka terminate) |
| 456 | * the FC-NVME Association. Terminating the association also |
| 457 | * terminates the FC-NVME connections (per queue, both admin and io |
| 458 | * queues) that are part of the association. E.g. things are torn |
| 459 | * down, and the related FC-NVME Association ID and Connection IDs |
| 460 | * become invalid. |
| 461 | * |
| 462 | * The behavior of the fc-nvme target is such that its |
| 463 | * understanding of the association and connections will implicitly |
| 464 | * be torn down. The action is implicit as it may be due to a loss of |
| 465 | * connectivity with the fc-nvme host, so the target may never get a |
| 466 | * response even if it tried. As such, the action of this routine |
| 467 | * is to asynchronously send the LS, ignore any results of the LS, and |
| 468 | * continue on with terminating the association. If the fc-nvme host |
| 469 | * is present and receives the LS, it too can tear down. |
| 470 | */ |
| 471 | static void |
| 472 | nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc) |
| 473 | { |
| 474 | struct nvmet_fc_tgtport *tgtport = assoc->tgtport; |
| 475 | struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst; |
| 476 | struct fcnvme_ls_disconnect_assoc_acc *discon_acc; |
| 477 | struct nvmet_fc_ls_req_op *lsop; |
| 478 | struct nvmefc_ls_req *lsreq; |
| 479 | int ret; |
| 480 | |
| 481 | /* |
| 482 | * If ls_req is NULL or no hosthandle, it's an older lldd and no |
| 483 | * message is normal. Otherwise, send unless the hostport has |
| 484 | * already been invalidated by the lldd. |
| 485 | */ |
| 486 | if (!tgtport->ops->ls_req || assoc->hostport->invalid) |
| 487 | return; |
| 488 | |
| 489 | lsop = kzalloc((sizeof(*lsop) + |
| 490 | sizeof(*discon_rqst) + sizeof(*discon_acc) + |
| 491 | tgtport->ops->lsrqst_priv_sz), GFP_KERNEL); |
| 492 | if (!lsop) { |
| 493 | pr_info("{%d:%d}: send Disconnect Association failed: ENOMEM\n" , |
| 494 | tgtport->fc_target_port.port_num, assoc->a_id); |
| 495 | return; |
| 496 | } |
| 497 | |
| 498 | discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1]; |
| 499 | discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1]; |
| 500 | lsreq = &lsop->ls_req; |
| 501 | if (tgtport->ops->lsrqst_priv_sz) |
| 502 | lsreq->private = (void *)&discon_acc[1]; |
| 503 | else |
| 504 | lsreq->private = NULL; |
| 505 | |
| 506 | lsop->tgtport = tgtport; |
| 507 | lsop->hosthandle = assoc->hostport->hosthandle; |
| 508 | |
| 509 | nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc, |
| 510 | association_id: assoc->association_id); |
| 511 | |
| 512 | ret = nvmet_fc_send_ls_req_async(tgtport, lsop, |
| 513 | done: nvmet_fc_disconnect_assoc_done); |
| 514 | if (ret) { |
| 515 | pr_info("{%d:%d}: XMT Disconnect Association failed: %d\n" , |
| 516 | tgtport->fc_target_port.port_num, assoc->a_id, ret); |
| 517 | kfree(objp: lsop); |
| 518 | } |
| 519 | } |
| 520 | |
| 521 | |
| 522 | /* *********************** FC-NVME Port Management ************************ */ |
| 523 | |
| 524 | |
| 525 | static int |
| 526 | nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport) |
| 527 | { |
| 528 | struct nvmet_fc_ls_iod *iod; |
| 529 | int i; |
| 530 | |
| 531 | iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod), |
| 532 | GFP_KERNEL); |
| 533 | if (!iod) |
| 534 | return -ENOMEM; |
| 535 | |
| 536 | tgtport->iod = iod; |
| 537 | |
| 538 | for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { |
| 539 | INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work); |
| 540 | iod->tgtport = tgtport; |
| 541 | list_add_tail(new: &iod->ls_rcv_list, head: &tgtport->ls_rcv_list); |
| 542 | |
| 543 | iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) + |
| 544 | sizeof(union nvmefc_ls_responses), |
| 545 | GFP_KERNEL); |
| 546 | if (!iod->rqstbuf) |
| 547 | goto out_fail; |
| 548 | |
| 549 | iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1]; |
| 550 | |
| 551 | iod->rspdma = fc_dma_map_single(dev: tgtport->dev, ptr: iod->rspbuf, |
| 552 | size: sizeof(*iod->rspbuf), |
| 553 | dir: DMA_TO_DEVICE); |
| 554 | if (fc_dma_mapping_error(dev: tgtport->dev, dma_addr: iod->rspdma)) |
| 555 | goto out_fail; |
| 556 | } |
| 557 | |
| 558 | return 0; |
| 559 | |
| 560 | out_fail: |
| 561 | kfree(objp: iod->rqstbuf); |
| 562 | list_del(entry: &iod->ls_rcv_list); |
| 563 | for (iod--, i--; i >= 0; iod--, i--) { |
| 564 | fc_dma_unmap_single(dev: tgtport->dev, addr: iod->rspdma, |
| 565 | size: sizeof(*iod->rspbuf), dir: DMA_TO_DEVICE); |
| 566 | kfree(objp: iod->rqstbuf); |
| 567 | list_del(entry: &iod->ls_rcv_list); |
| 568 | } |
| 569 | |
| 570 | kfree(objp: iod); |
| 571 | |
| 572 | return -EFAULT; |
| 573 | } |
| 574 | |
| 575 | static void |
| 576 | nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport) |
| 577 | { |
| 578 | struct nvmet_fc_ls_iod *iod = tgtport->iod; |
| 579 | int i; |
| 580 | |
| 581 | for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { |
| 582 | fc_dma_unmap_single(dev: tgtport->dev, |
| 583 | addr: iod->rspdma, size: sizeof(*iod->rspbuf), |
| 584 | dir: DMA_TO_DEVICE); |
| 585 | kfree(objp: iod->rqstbuf); |
| 586 | list_del(entry: &iod->ls_rcv_list); |
| 587 | } |
| 588 | kfree(objp: tgtport->iod); |
| 589 | } |
| 590 | |
| 591 | static struct nvmet_fc_ls_iod * |
| 592 | nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport) |
| 593 | { |
| 594 | struct nvmet_fc_ls_iod *iod; |
| 595 | unsigned long flags; |
| 596 | |
| 597 | spin_lock_irqsave(&tgtport->lock, flags); |
| 598 | iod = list_first_entry_or_null(&tgtport->ls_rcv_list, |
| 599 | struct nvmet_fc_ls_iod, ls_rcv_list); |
| 600 | if (iod) |
| 601 | list_move_tail(list: &iod->ls_rcv_list, head: &tgtport->ls_busylist); |
| 602 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
| 603 | return iod; |
| 604 | } |
| 605 | |
| 606 | |
| 607 | static void |
| 608 | nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport, |
| 609 | struct nvmet_fc_ls_iod *iod) |
| 610 | { |
| 611 | unsigned long flags; |
| 612 | |
| 613 | spin_lock_irqsave(&tgtport->lock, flags); |
| 614 | list_move(list: &iod->ls_rcv_list, head: &tgtport->ls_rcv_list); |
| 615 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
| 616 | } |
| 617 | |
| 618 | static void |
| 619 | nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, |
| 620 | struct nvmet_fc_tgt_queue *queue) |
| 621 | { |
| 622 | struct nvmet_fc_fcp_iod *fod = queue->fod; |
| 623 | int i; |
| 624 | |
| 625 | for (i = 0; i < queue->sqsize; fod++, i++) { |
| 626 | INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work); |
| 627 | fod->tgtport = tgtport; |
| 628 | fod->queue = queue; |
| 629 | fod->active = false; |
| 630 | fod->abort = false; |
| 631 | fod->aborted = false; |
| 632 | fod->fcpreq = NULL; |
| 633 | list_add_tail(new: &fod->fcp_list, head: &queue->fod_list); |
| 634 | spin_lock_init(&fod->flock); |
| 635 | |
| 636 | fod->rspdma = fc_dma_map_single(dev: tgtport->dev, ptr: &fod->rspiubuf, |
| 637 | size: sizeof(fod->rspiubuf), dir: DMA_TO_DEVICE); |
| 638 | if (fc_dma_mapping_error(dev: tgtport->dev, dma_addr: fod->rspdma)) { |
| 639 | list_del(entry: &fod->fcp_list); |
| 640 | for (fod--, i--; i >= 0; fod--, i--) { |
| 641 | fc_dma_unmap_single(dev: tgtport->dev, addr: fod->rspdma, |
| 642 | size: sizeof(fod->rspiubuf), |
| 643 | dir: DMA_TO_DEVICE); |
| 644 | fod->rspdma = 0L; |
| 645 | list_del(entry: &fod->fcp_list); |
| 646 | } |
| 647 | |
| 648 | return; |
| 649 | } |
| 650 | } |
| 651 | } |
| 652 | |
| 653 | static void |
| 654 | nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, |
| 655 | struct nvmet_fc_tgt_queue *queue) |
| 656 | { |
| 657 | struct nvmet_fc_fcp_iod *fod = queue->fod; |
| 658 | int i; |
| 659 | |
| 660 | for (i = 0; i < queue->sqsize; fod++, i++) { |
| 661 | if (fod->rspdma) |
| 662 | fc_dma_unmap_single(dev: tgtport->dev, addr: fod->rspdma, |
| 663 | size: sizeof(fod->rspiubuf), dir: DMA_TO_DEVICE); |
| 664 | } |
| 665 | } |
| 666 | |
| 667 | static struct nvmet_fc_fcp_iod * |
| 668 | nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) |
| 669 | { |
| 670 | struct nvmet_fc_fcp_iod *fod; |
| 671 | |
| 672 | lockdep_assert_held(&queue->qlock); |
| 673 | |
| 674 | fod = list_first_entry_or_null(&queue->fod_list, |
| 675 | struct nvmet_fc_fcp_iod, fcp_list); |
| 676 | if (fod) { |
| 677 | list_del(entry: &fod->fcp_list); |
| 678 | fod->active = true; |
| 679 | /* |
| 680 | * no queue reference is taken, as it was taken by the |
| 681 | * queue lookup just prior to the allocation. The iod |
| 682 | * will "inherit" that reference. |
| 683 | */ |
| 684 | } |
| 685 | return fod; |
| 686 | } |
| 687 | |
| 688 | |
| 689 | static void |
| 690 | nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport, |
| 691 | struct nvmet_fc_tgt_queue *queue, |
| 692 | struct nvmefc_tgt_fcp_req *fcpreq) |
| 693 | { |
| 694 | struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; |
| 695 | |
| 696 | /* |
| 697 | * put all admin cmds on hw queue id 0. All io commands go to |
| 698 | * the respective hw queue based on a modulo basis |
| 699 | */ |
| 700 | fcpreq->hwqid = queue->qid ? |
| 701 | ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; |
| 702 | |
| 703 | nvmet_fc_handle_fcp_rqst(tgtport, fod); |
| 704 | } |
| 705 | |
| 706 | static void |
| 707 | nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work) |
| 708 | { |
| 709 | struct nvmet_fc_fcp_iod *fod = |
| 710 | container_of(work, struct nvmet_fc_fcp_iod, defer_work); |
| 711 | |
| 712 | /* Submit deferred IO for processing */ |
| 713 | nvmet_fc_queue_fcp_req(tgtport: fod->tgtport, queue: fod->queue, fcpreq: fod->fcpreq); |
| 714 | |
| 715 | } |
| 716 | |
| 717 | static void |
| 718 | nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, |
| 719 | struct nvmet_fc_fcp_iod *fod) |
| 720 | { |
| 721 | struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; |
| 722 | struct nvmet_fc_tgtport *tgtport = fod->tgtport; |
| 723 | struct nvmet_fc_defer_fcp_req *deferfcp; |
| 724 | unsigned long flags; |
| 725 | |
| 726 | fc_dma_sync_single_for_cpu(dev: tgtport->dev, addr: fod->rspdma, |
| 727 | size: sizeof(fod->rspiubuf), dir: DMA_TO_DEVICE); |
| 728 | |
| 729 | fcpreq->nvmet_fc_private = NULL; |
| 730 | |
| 731 | fod->active = false; |
| 732 | fod->abort = false; |
| 733 | fod->aborted = false; |
| 734 | fod->writedataactive = false; |
| 735 | fod->fcpreq = NULL; |
| 736 | |
| 737 | tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); |
| 738 | |
| 739 | /* release the queue lookup reference on the completed IO */ |
| 740 | nvmet_fc_tgt_q_put(queue); |
| 741 | |
| 742 | spin_lock_irqsave(&queue->qlock, flags); |
| 743 | deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, |
| 744 | struct nvmet_fc_defer_fcp_req, req_list); |
| 745 | if (!deferfcp) { |
| 746 | list_add_tail(new: &fod->fcp_list, head: &fod->queue->fod_list); |
| 747 | spin_unlock_irqrestore(lock: &queue->qlock, flags); |
| 748 | return; |
| 749 | } |
| 750 | |
| 751 | /* Re-use the fod for the next pending cmd that was deferred */ |
| 752 | list_del(entry: &deferfcp->req_list); |
| 753 | |
| 754 | fcpreq = deferfcp->fcp_req; |
| 755 | |
| 756 | /* deferfcp can be reused for another IO at a later date */ |
| 757 | list_add_tail(new: &deferfcp->req_list, head: &queue->avail_defer_list); |
| 758 | |
| 759 | spin_unlock_irqrestore(lock: &queue->qlock, flags); |
| 760 | |
| 761 | /* Save NVME CMD IO in fod */ |
| 762 | memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen); |
| 763 | |
| 764 | /* Setup new fcpreq to be processed */ |
| 765 | fcpreq->rspaddr = NULL; |
| 766 | fcpreq->rsplen = 0; |
| 767 | fcpreq->nvmet_fc_private = fod; |
| 768 | fod->fcpreq = fcpreq; |
| 769 | fod->active = true; |
| 770 | |
| 771 | /* inform LLDD IO is now being processed */ |
| 772 | tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq); |
| 773 | |
| 774 | /* |
| 775 | * Leave the queue lookup get reference taken when |
| 776 | * fod was originally allocated. |
| 777 | */ |
| 778 | |
| 779 | queue_work(wq: queue->work_q, work: &fod->defer_work); |
| 780 | } |
| 781 | |
| 782 | static struct nvmet_fc_tgt_queue * |
| 783 | nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, |
| 784 | u16 qid, u16 sqsize) |
| 785 | { |
| 786 | struct nvmet_fc_tgt_queue *queue; |
| 787 | int ret; |
| 788 | |
| 789 | if (qid > NVMET_NR_QUEUES) |
| 790 | return NULL; |
| 791 | |
| 792 | queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL); |
| 793 | if (!queue) |
| 794 | return NULL; |
| 795 | |
| 796 | queue->work_q = alloc_workqueue("ntfc%d.%d.%d" , 0, 0, |
| 797 | assoc->tgtport->fc_target_port.port_num, |
| 798 | assoc->a_id, qid); |
| 799 | if (!queue->work_q) |
| 800 | goto out_free_queue; |
| 801 | |
| 802 | queue->qid = qid; |
| 803 | queue->sqsize = sqsize; |
| 804 | queue->assoc = assoc; |
| 805 | INIT_LIST_HEAD(list: &queue->fod_list); |
| 806 | INIT_LIST_HEAD(list: &queue->avail_defer_list); |
| 807 | INIT_LIST_HEAD(list: &queue->pending_cmd_list); |
| 808 | atomic_set(v: &queue->connected, i: 0); |
| 809 | atomic_set(v: &queue->sqtail, i: 0); |
| 810 | atomic_set(v: &queue->rsn, i: 1); |
| 811 | atomic_set(v: &queue->zrspcnt, i: 0); |
| 812 | spin_lock_init(&queue->qlock); |
| 813 | kref_init(kref: &queue->ref); |
| 814 | |
| 815 | nvmet_fc_prep_fcp_iodlist(tgtport: assoc->tgtport, queue); |
| 816 | |
| 817 | nvmet_cq_init(cq: &queue->nvme_cq); |
| 818 | ret = nvmet_sq_init(sq: &queue->nvme_sq, cq: &queue->nvme_cq); |
| 819 | if (ret) |
| 820 | goto out_fail_iodlist; |
| 821 | |
| 822 | WARN_ON(assoc->queues[qid]); |
| 823 | assoc->queues[qid] = queue; |
| 824 | |
| 825 | return queue; |
| 826 | |
| 827 | out_fail_iodlist: |
| 828 | nvmet_cq_put(cq: &queue->nvme_cq); |
| 829 | nvmet_fc_destroy_fcp_iodlist(tgtport: assoc->tgtport, queue); |
| 830 | destroy_workqueue(wq: queue->work_q); |
| 831 | out_free_queue: |
| 832 | kfree(objp: queue); |
| 833 | return NULL; |
| 834 | } |
| 835 | |
| 836 | |
| 837 | static void |
| 838 | nvmet_fc_tgt_queue_free(struct kref *ref) |
| 839 | { |
| 840 | struct nvmet_fc_tgt_queue *queue = |
| 841 | container_of(ref, struct nvmet_fc_tgt_queue, ref); |
| 842 | |
| 843 | nvmet_fc_destroy_fcp_iodlist(tgtport: queue->assoc->tgtport, queue); |
| 844 | |
| 845 | destroy_workqueue(wq: queue->work_q); |
| 846 | |
| 847 | kfree(objp: queue); |
| 848 | } |
| 849 | |
| 850 | static void |
| 851 | nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue) |
| 852 | { |
| 853 | kref_put(kref: &queue->ref, release: nvmet_fc_tgt_queue_free); |
| 854 | } |
| 855 | |
| 856 | static int |
| 857 | nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue) |
| 858 | { |
| 859 | return kref_get_unless_zero(kref: &queue->ref); |
| 860 | } |
| 861 | |
| 862 | |
| 863 | static void |
| 864 | nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) |
| 865 | { |
| 866 | struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; |
| 867 | struct nvmet_fc_fcp_iod *fod = queue->fod; |
| 868 | struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr; |
| 869 | unsigned long flags; |
| 870 | int i; |
| 871 | bool disconnect; |
| 872 | |
| 873 | disconnect = atomic_xchg(v: &queue->connected, new: 0); |
| 874 | |
| 875 | /* if not connected, nothing to do */ |
| 876 | if (!disconnect) |
| 877 | return; |
| 878 | |
| 879 | spin_lock_irqsave(&queue->qlock, flags); |
| 880 | /* abort outstanding io's */ |
| 881 | for (i = 0; i < queue->sqsize; fod++, i++) { |
| 882 | if (fod->active) { |
| 883 | spin_lock(lock: &fod->flock); |
| 884 | fod->abort = true; |
| 885 | /* |
| 886 | * only call lldd abort routine if waiting for |
| 887 | * writedata. other outstanding ops should finish |
| 888 | * on their own. |
| 889 | */ |
| 890 | if (fod->writedataactive) { |
| 891 | fod->aborted = true; |
| 892 | spin_unlock(lock: &fod->flock); |
| 893 | tgtport->ops->fcp_abort( |
| 894 | &tgtport->fc_target_port, fod->fcpreq); |
| 895 | } else |
| 896 | spin_unlock(lock: &fod->flock); |
| 897 | } |
| 898 | } |
| 899 | |
| 900 | /* Cleanup defer'ed IOs in queue */ |
| 901 | list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list, |
| 902 | req_list) { |
| 903 | list_del(entry: &deferfcp->req_list); |
| 904 | kfree(objp: deferfcp); |
| 905 | } |
| 906 | |
| 907 | for (;;) { |
| 908 | deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, |
| 909 | struct nvmet_fc_defer_fcp_req, req_list); |
| 910 | if (!deferfcp) |
| 911 | break; |
| 912 | |
| 913 | list_del(entry: &deferfcp->req_list); |
| 914 | spin_unlock_irqrestore(lock: &queue->qlock, flags); |
| 915 | |
| 916 | tgtport->ops->defer_rcv(&tgtport->fc_target_port, |
| 917 | deferfcp->fcp_req); |
| 918 | |
| 919 | tgtport->ops->fcp_abort(&tgtport->fc_target_port, |
| 920 | deferfcp->fcp_req); |
| 921 | |
| 922 | tgtport->ops->fcp_req_release(&tgtport->fc_target_port, |
| 923 | deferfcp->fcp_req); |
| 924 | |
| 925 | /* release the queue lookup reference */ |
| 926 | nvmet_fc_tgt_q_put(queue); |
| 927 | |
| 928 | kfree(objp: deferfcp); |
| 929 | |
| 930 | spin_lock_irqsave(&queue->qlock, flags); |
| 931 | } |
| 932 | spin_unlock_irqrestore(lock: &queue->qlock, flags); |
| 933 | |
| 934 | flush_workqueue(queue->work_q); |
| 935 | |
| 936 | nvmet_sq_destroy(sq: &queue->nvme_sq); |
| 937 | nvmet_cq_put(cq: &queue->nvme_cq); |
| 938 | |
| 939 | nvmet_fc_tgt_q_put(queue); |
| 940 | } |
| 941 | |
| 942 | static struct nvmet_fc_tgt_queue * |
| 943 | nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport, |
| 944 | u64 connection_id) |
| 945 | { |
| 946 | struct nvmet_fc_tgt_assoc *assoc; |
| 947 | struct nvmet_fc_tgt_queue *queue; |
| 948 | u64 association_id = nvmet_fc_getassociationid(connectionid: connection_id); |
| 949 | u16 qid = nvmet_fc_getqueueid(connectionid: connection_id); |
| 950 | |
| 951 | if (qid > NVMET_NR_QUEUES) |
| 952 | return NULL; |
| 953 | |
| 954 | rcu_read_lock(); |
| 955 | list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { |
| 956 | if (association_id == assoc->association_id) { |
| 957 | queue = assoc->queues[qid]; |
| 958 | if (queue && |
| 959 | (!atomic_read(v: &queue->connected) || |
| 960 | !nvmet_fc_tgt_q_get(queue))) |
| 961 | queue = NULL; |
| 962 | rcu_read_unlock(); |
| 963 | return queue; |
| 964 | } |
| 965 | } |
| 966 | rcu_read_unlock(); |
| 967 | return NULL; |
| 968 | } |
| 969 | |
| 970 | static void |
| 971 | nvmet_fc_hostport_free(struct kref *ref) |
| 972 | { |
| 973 | struct nvmet_fc_hostport *hostport = |
| 974 | container_of(ref, struct nvmet_fc_hostport, ref); |
| 975 | struct nvmet_fc_tgtport *tgtport = hostport->tgtport; |
| 976 | unsigned long flags; |
| 977 | |
| 978 | spin_lock_irqsave(&tgtport->lock, flags); |
| 979 | list_del(entry: &hostport->host_list); |
| 980 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
| 981 | if (tgtport->ops->host_release && hostport->invalid) |
| 982 | tgtport->ops->host_release(hostport->hosthandle); |
| 983 | kfree(objp: hostport); |
| 984 | nvmet_fc_tgtport_put(tgtport); |
| 985 | } |
| 986 | |
| 987 | static void |
| 988 | nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport) |
| 989 | { |
| 990 | kref_put(kref: &hostport->ref, release: nvmet_fc_hostport_free); |
| 991 | } |
| 992 | |
| 993 | static int |
| 994 | nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport) |
| 995 | { |
| 996 | return kref_get_unless_zero(kref: &hostport->ref); |
| 997 | } |
| 998 | |
| 999 | static struct nvmet_fc_hostport * |
| 1000 | nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) |
| 1001 | { |
| 1002 | struct nvmet_fc_hostport *host; |
| 1003 | |
| 1004 | lockdep_assert_held(&tgtport->lock); |
| 1005 | |
| 1006 | list_for_each_entry(host, &tgtport->host_list, host_list) { |
| 1007 | if (host->hosthandle == hosthandle && !host->invalid) { |
| 1008 | if (nvmet_fc_hostport_get(hostport: host)) |
| 1009 | return host; |
| 1010 | } |
| 1011 | } |
| 1012 | |
| 1013 | return NULL; |
| 1014 | } |
| 1015 | |
| 1016 | static struct nvmet_fc_hostport * |
| 1017 | nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) |
| 1018 | { |
| 1019 | struct nvmet_fc_hostport *newhost, *match = NULL; |
| 1020 | unsigned long flags; |
| 1021 | |
| 1022 | /* |
| 1023 | * Caller holds a reference on tgtport. |
| 1024 | */ |
| 1025 | |
| 1026 | /* if LLDD not implemented, leave as NULL */ |
| 1027 | if (!hosthandle) |
| 1028 | return NULL; |
| 1029 | |
| 1030 | spin_lock_irqsave(&tgtport->lock, flags); |
| 1031 | match = nvmet_fc_match_hostport(tgtport, hosthandle); |
| 1032 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
| 1033 | |
| 1034 | if (match) |
| 1035 | return match; |
| 1036 | |
| 1037 | newhost = kzalloc(sizeof(*newhost), GFP_KERNEL); |
| 1038 | if (!newhost) |
| 1039 | return ERR_PTR(error: -ENOMEM); |
| 1040 | |
| 1041 | spin_lock_irqsave(&tgtport->lock, flags); |
| 1042 | match = nvmet_fc_match_hostport(tgtport, hosthandle); |
| 1043 | if (match) { |
| 1044 | /* new allocation not needed */ |
| 1045 | kfree(objp: newhost); |
| 1046 | newhost = match; |
| 1047 | } else { |
| 1048 | nvmet_fc_tgtport_get(tgtport); |
| 1049 | newhost->tgtport = tgtport; |
| 1050 | newhost->hosthandle = hosthandle; |
| 1051 | INIT_LIST_HEAD(list: &newhost->host_list); |
| 1052 | kref_init(kref: &newhost->ref); |
| 1053 | |
| 1054 | list_add_tail(new: &newhost->host_list, head: &tgtport->host_list); |
| 1055 | } |
| 1056 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
| 1057 | |
| 1058 | return newhost; |
| 1059 | } |
| 1060 | |
| 1061 | static void |
| 1062 | nvmet_fc_delete_assoc_work(struct work_struct *work) |
| 1063 | { |
| 1064 | struct nvmet_fc_tgt_assoc *assoc = |
| 1065 | container_of(work, struct nvmet_fc_tgt_assoc, del_work); |
| 1066 | struct nvmet_fc_tgtport *tgtport = assoc->tgtport; |
| 1067 | |
| 1068 | nvmet_fc_delete_target_assoc(assoc); |
| 1069 | nvmet_fc_tgt_a_put(assoc); |
| 1070 | nvmet_fc_tgtport_put(tgtport); |
| 1071 | } |
| 1072 | |
| 1073 | static void |
| 1074 | nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc) |
| 1075 | { |
| 1076 | int terminating; |
| 1077 | |
| 1078 | terminating = atomic_xchg(v: &assoc->terminating, new: 1); |
| 1079 | |
| 1080 | /* if already terminating, do nothing */ |
| 1081 | if (terminating) |
| 1082 | return; |
| 1083 | |
| 1084 | nvmet_fc_tgtport_get(tgtport: assoc->tgtport); |
| 1085 | if (!queue_work(wq: nvmet_wq, work: &assoc->del_work)) |
| 1086 | nvmet_fc_tgtport_put(tgtport: assoc->tgtport); |
| 1087 | } |
| 1088 | |
| 1089 | static bool |
| 1090 | nvmet_fc_assoc_exists(struct nvmet_fc_tgtport *tgtport, u64 association_id) |
| 1091 | { |
| 1092 | struct nvmet_fc_tgt_assoc *a; |
| 1093 | bool found = false; |
| 1094 | |
| 1095 | rcu_read_lock(); |
| 1096 | list_for_each_entry_rcu(a, &tgtport->assoc_list, a_list) { |
| 1097 | if (association_id == a->association_id) { |
| 1098 | found = true; |
| 1099 | break; |
| 1100 | } |
| 1101 | } |
| 1102 | rcu_read_unlock(); |
| 1103 | |
| 1104 | return found; |
| 1105 | } |
| 1106 | |
| 1107 | static struct nvmet_fc_tgt_assoc * |
| 1108 | nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle) |
| 1109 | { |
| 1110 | struct nvmet_fc_tgt_assoc *assoc; |
| 1111 | unsigned long flags; |
| 1112 | bool done; |
| 1113 | u64 ran; |
| 1114 | int idx; |
| 1115 | |
| 1116 | if (!tgtport->pe) |
| 1117 | return NULL; |
| 1118 | |
| 1119 | assoc = kzalloc(sizeof(*assoc), GFP_KERNEL); |
| 1120 | if (!assoc) |
| 1121 | return NULL; |
| 1122 | |
| 1123 | idx = ida_alloc(ida: &tgtport->assoc_cnt, GFP_KERNEL); |
| 1124 | if (idx < 0) |
| 1125 | goto out_free_assoc; |
| 1126 | |
| 1127 | assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle); |
| 1128 | if (IS_ERR(ptr: assoc->hostport)) |
| 1129 | goto out_ida; |
| 1130 | |
| 1131 | assoc->tgtport = tgtport; |
| 1132 | nvmet_fc_tgtport_get(tgtport); |
| 1133 | assoc->a_id = idx; |
| 1134 | INIT_LIST_HEAD(list: &assoc->a_list); |
| 1135 | kref_init(kref: &assoc->ref); |
| 1136 | INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc_work); |
| 1137 | atomic_set(v: &assoc->terminating, i: 0); |
| 1138 | |
| 1139 | done = false; |
| 1140 | do { |
| 1141 | get_random_bytes(buf: &ran, len: sizeof(ran) - BYTES_FOR_QID); |
| 1142 | ran = ran << BYTES_FOR_QID_SHIFT; |
| 1143 | |
| 1144 | spin_lock_irqsave(&tgtport->lock, flags); |
| 1145 | if (!nvmet_fc_assoc_exists(tgtport, association_id: ran)) { |
| 1146 | assoc->association_id = ran; |
| 1147 | list_add_tail_rcu(new: &assoc->a_list, head: &tgtport->assoc_list); |
| 1148 | done = true; |
| 1149 | } |
| 1150 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
| 1151 | } while (!done); |
| 1152 | |
| 1153 | return assoc; |
| 1154 | |
| 1155 | out_ida: |
| 1156 | ida_free(&tgtport->assoc_cnt, id: idx); |
| 1157 | out_free_assoc: |
| 1158 | kfree(objp: assoc); |
| 1159 | return NULL; |
| 1160 | } |
| 1161 | |
| 1162 | static void |
| 1163 | nvmet_fc_target_assoc_free(struct kref *ref) |
| 1164 | { |
| 1165 | struct nvmet_fc_tgt_assoc *assoc = |
| 1166 | container_of(ref, struct nvmet_fc_tgt_assoc, ref); |
| 1167 | struct nvmet_fc_tgtport *tgtport = assoc->tgtport; |
| 1168 | struct nvmet_fc_ls_iod *oldls; |
| 1169 | unsigned long flags; |
| 1170 | int i; |
| 1171 | |
| 1172 | for (i = NVMET_NR_QUEUES; i >= 0; i--) { |
| 1173 | if (assoc->queues[i]) |
| 1174 | nvmet_fc_delete_target_queue(queue: assoc->queues[i]); |
| 1175 | } |
| 1176 | |
| 1177 | /* Send Disconnect now that all i/o has completed */ |
| 1178 | nvmet_fc_xmt_disconnect_assoc(assoc); |
| 1179 | |
| 1180 | nvmet_fc_hostport_put(hostport: assoc->hostport); |
| 1181 | spin_lock_irqsave(&tgtport->lock, flags); |
| 1182 | oldls = assoc->rcv_disconn; |
| 1183 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
| 1184 | /* if pending Rcv Disconnect Association LS, send rsp now */ |
| 1185 | if (oldls) |
| 1186 | nvmet_fc_xmt_ls_rsp(tgtport, iod: oldls); |
| 1187 | ida_free(&tgtport->assoc_cnt, id: assoc->a_id); |
| 1188 | pr_info("{%d:%d}: Association freed\n" , |
| 1189 | tgtport->fc_target_port.port_num, assoc->a_id); |
| 1190 | kfree(objp: assoc); |
| 1191 | } |
| 1192 | |
| 1193 | static void |
| 1194 | nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc) |
| 1195 | { |
| 1196 | kref_put(kref: &assoc->ref, release: nvmet_fc_target_assoc_free); |
| 1197 | } |
| 1198 | |
| 1199 | static int |
| 1200 | nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc) |
| 1201 | { |
| 1202 | return kref_get_unless_zero(kref: &assoc->ref); |
| 1203 | } |
| 1204 | |
| 1205 | static void |
| 1206 | nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) |
| 1207 | { |
| 1208 | struct nvmet_fc_tgtport *tgtport = assoc->tgtport; |
| 1209 | unsigned long flags; |
| 1210 | int i; |
| 1211 | |
| 1212 | spin_lock_irqsave(&tgtport->lock, flags); |
| 1213 | list_del_rcu(entry: &assoc->a_list); |
| 1214 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
| 1215 | |
| 1216 | synchronize_rcu(); |
| 1217 | |
| 1218 | /* ensure all in-flight I/Os have been processed */ |
| 1219 | for (i = NVMET_NR_QUEUES; i >= 0; i--) { |
| 1220 | if (assoc->queues[i]) |
| 1221 | flush_workqueue(assoc->queues[i]->work_q); |
| 1222 | } |
| 1223 | |
| 1224 | pr_info("{%d:%d}: Association deleted\n" , |
| 1225 | tgtport->fc_target_port.port_num, assoc->a_id); |
| 1226 | |
| 1227 | nvmet_fc_tgtport_put(tgtport); |
| 1228 | } |
| 1229 | |
| 1230 | static struct nvmet_fc_tgt_assoc * |
| 1231 | nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport, |
| 1232 | u64 association_id) |
| 1233 | { |
| 1234 | struct nvmet_fc_tgt_assoc *assoc; |
| 1235 | struct nvmet_fc_tgt_assoc *ret = NULL; |
| 1236 | |
| 1237 | rcu_read_lock(); |
| 1238 | list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { |
| 1239 | if (association_id == assoc->association_id) { |
| 1240 | ret = assoc; |
| 1241 | if (!nvmet_fc_tgt_a_get(assoc)) |
| 1242 | ret = NULL; |
| 1243 | break; |
| 1244 | } |
| 1245 | } |
| 1246 | rcu_read_unlock(); |
| 1247 | |
| 1248 | return ret; |
| 1249 | } |
| 1250 | |
| 1251 | static void |
| 1252 | nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport, |
| 1253 | struct nvmet_fc_port_entry *pe, |
| 1254 | struct nvmet_port *port) |
| 1255 | { |
| 1256 | lockdep_assert_held(&nvmet_fc_tgtlock); |
| 1257 | |
| 1258 | nvmet_fc_tgtport_get(tgtport); |
| 1259 | pe->tgtport = tgtport; |
| 1260 | tgtport->pe = pe; |
| 1261 | |
| 1262 | pe->port = port; |
| 1263 | port->priv = pe; |
| 1264 | |
| 1265 | pe->node_name = tgtport->fc_target_port.node_name; |
| 1266 | pe->port_name = tgtport->fc_target_port.port_name; |
| 1267 | INIT_LIST_HEAD(list: &pe->pe_list); |
| 1268 | |
| 1269 | list_add_tail(new: &pe->pe_list, head: &nvmet_fc_portentry_list); |
| 1270 | } |
| 1271 | |
| 1272 | static void |
| 1273 | nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe) |
| 1274 | { |
| 1275 | unsigned long flags; |
| 1276 | |
| 1277 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
| 1278 | if (pe->tgtport) { |
| 1279 | nvmet_fc_tgtport_put(tgtport: pe->tgtport); |
| 1280 | pe->tgtport->pe = NULL; |
| 1281 | } |
| 1282 | list_del(entry: &pe->pe_list); |
| 1283 | spin_unlock_irqrestore(lock: &nvmet_fc_tgtlock, flags); |
| 1284 | } |
| 1285 | |
| 1286 | /* |
| 1287 | * called when a targetport deregisters. Breaks the relationship |
| 1288 | * with the nvmet port, but leaves the port_entry in place so that |
| 1289 | * re-registration can resume operation. |
| 1290 | */ |
| 1291 | static void |
| 1292 | nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport) |
| 1293 | { |
| 1294 | struct nvmet_fc_port_entry *pe; |
| 1295 | unsigned long flags; |
| 1296 | |
| 1297 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
| 1298 | pe = tgtport->pe; |
| 1299 | if (pe) { |
| 1300 | nvmet_fc_tgtport_put(tgtport: pe->tgtport); |
| 1301 | pe->tgtport = NULL; |
| 1302 | } |
| 1303 | tgtport->pe = NULL; |
| 1304 | spin_unlock_irqrestore(lock: &nvmet_fc_tgtlock, flags); |
| 1305 | } |
| 1306 | |
| 1307 | /* |
| 1308 | * called when a new targetport is registered. Looks in the |
| 1309 | * existing nvmet port_entries to see if the nvmet layer is |
| 1310 | * configured for the targetport's wwn's. (the targetport existed, |
| 1311 | * nvmet configured, the lldd unregistered the tgtport, and is now |
| 1312 | * reregistering the same targetport). If so, set the nvmet port |
| 1313 | * port entry on the targetport. |
| 1314 | */ |
| 1315 | static void |
| 1316 | nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport) |
| 1317 | { |
| 1318 | struct nvmet_fc_port_entry *pe; |
| 1319 | unsigned long flags; |
| 1320 | |
| 1321 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
| 1322 | list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) { |
| 1323 | if (tgtport->fc_target_port.node_name == pe->node_name && |
| 1324 | tgtport->fc_target_port.port_name == pe->port_name) { |
| 1325 | if (!nvmet_fc_tgtport_get(tgtport)) |
| 1326 | continue; |
| 1327 | |
| 1328 | WARN_ON(pe->tgtport); |
| 1329 | tgtport->pe = pe; |
| 1330 | pe->tgtport = tgtport; |
| 1331 | break; |
| 1332 | } |
| 1333 | } |
| 1334 | spin_unlock_irqrestore(lock: &nvmet_fc_tgtlock, flags); |
| 1335 | } |
| 1336 | |
| 1337 | /** |
| 1338 | * nvmet_fc_register_targetport - transport entry point called by an |
| 1339 | * LLDD to register the existence of a local |
| 1340 | * NVME subsystem FC port. |
| 1341 | * @pinfo: pointer to information about the port to be registered |
| 1342 | * @template: LLDD entrypoints and operational parameters for the port |
| 1343 | * @dev: physical hardware device node port corresponds to. Will be |
| 1344 | * used for DMA mappings |
| 1345 | * @portptr: pointer to a local port pointer. Upon success, the routine |
| 1346 | * will allocate a nvme_fc_local_port structure and place its |
| 1347 | * address in the local port pointer. Upon failure, local port |
| 1348 | * pointer will be set to NULL. |
| 1349 | * |
| 1350 | * Returns: |
| 1351 | * a completion status. Must be 0 upon success; a negative errno |
| 1352 | * (ex: -ENXIO) upon failure. |
| 1353 | */ |
| 1354 | int |
| 1355 | nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, |
| 1356 | struct nvmet_fc_target_template *template, |
| 1357 | struct device *dev, |
| 1358 | struct nvmet_fc_target_port **portptr) |
| 1359 | { |
| 1360 | struct nvmet_fc_tgtport *newrec; |
| 1361 | unsigned long flags; |
| 1362 | int ret, idx; |
| 1363 | |
| 1364 | if (!template->xmt_ls_rsp || !template->fcp_op || |
| 1365 | !template->fcp_abort || |
| 1366 | !template->fcp_req_release || !template->targetport_delete || |
| 1367 | !template->max_hw_queues || !template->max_sgl_segments || |
| 1368 | !template->max_dif_sgl_segments || !template->dma_boundary) { |
| 1369 | ret = -EINVAL; |
| 1370 | goto out_regtgt_failed; |
| 1371 | } |
| 1372 | |
| 1373 | newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz), |
| 1374 | GFP_KERNEL); |
| 1375 | if (!newrec) { |
| 1376 | ret = -ENOMEM; |
| 1377 | goto out_regtgt_failed; |
| 1378 | } |
| 1379 | |
| 1380 | idx = ida_alloc(ida: &nvmet_fc_tgtport_cnt, GFP_KERNEL); |
| 1381 | if (idx < 0) { |
| 1382 | ret = -ENOSPC; |
| 1383 | goto out_fail_kfree; |
| 1384 | } |
| 1385 | |
| 1386 | if (!get_device(dev) && dev) { |
| 1387 | ret = -ENODEV; |
| 1388 | goto out_ida_put; |
| 1389 | } |
| 1390 | |
| 1391 | newrec->fc_target_port.node_name = pinfo->node_name; |
| 1392 | newrec->fc_target_port.port_name = pinfo->port_name; |
| 1393 | if (template->target_priv_sz) |
| 1394 | newrec->fc_target_port.private = &newrec[1]; |
| 1395 | else |
| 1396 | newrec->fc_target_port.private = NULL; |
| 1397 | newrec->fc_target_port.port_id = pinfo->port_id; |
| 1398 | newrec->fc_target_port.port_num = idx; |
| 1399 | INIT_LIST_HEAD(list: &newrec->tgt_list); |
| 1400 | newrec->dev = dev; |
| 1401 | newrec->ops = template; |
| 1402 | spin_lock_init(&newrec->lock); |
| 1403 | INIT_LIST_HEAD(list: &newrec->ls_rcv_list); |
| 1404 | INIT_LIST_HEAD(list: &newrec->ls_req_list); |
| 1405 | INIT_LIST_HEAD(list: &newrec->ls_busylist); |
| 1406 | INIT_LIST_HEAD(list: &newrec->assoc_list); |
| 1407 | INIT_LIST_HEAD(list: &newrec->host_list); |
| 1408 | kref_init(kref: &newrec->ref); |
| 1409 | ida_init(ida: &newrec->assoc_cnt); |
| 1410 | newrec->max_sg_cnt = template->max_sgl_segments; |
| 1411 | |
| 1412 | ret = nvmet_fc_alloc_ls_iodlist(tgtport: newrec); |
| 1413 | if (ret) { |
| 1414 | ret = -ENOMEM; |
| 1415 | goto out_free_newrec; |
| 1416 | } |
| 1417 | |
| 1418 | nvmet_fc_portentry_rebind_tgt(tgtport: newrec); |
| 1419 | |
| 1420 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
| 1421 | list_add_tail(new: &newrec->tgt_list, head: &nvmet_fc_target_list); |
| 1422 | spin_unlock_irqrestore(lock: &nvmet_fc_tgtlock, flags); |
| 1423 | |
| 1424 | *portptr = &newrec->fc_target_port; |
| 1425 | return 0; |
| 1426 | |
| 1427 | out_free_newrec: |
| 1428 | put_device(dev); |
| 1429 | out_ida_put: |
| 1430 | ida_free(&nvmet_fc_tgtport_cnt, id: idx); |
| 1431 | out_fail_kfree: |
| 1432 | kfree(objp: newrec); |
| 1433 | out_regtgt_failed: |
| 1434 | *portptr = NULL; |
| 1435 | return ret; |
| 1436 | } |
| 1437 | EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport); |
| 1438 | |
| 1439 | |
| 1440 | static void |
| 1441 | nvmet_fc_free_tgtport(struct kref *ref) |
| 1442 | { |
| 1443 | struct nvmet_fc_tgtport *tgtport = |
| 1444 | container_of(ref, struct nvmet_fc_tgtport, ref); |
| 1445 | struct device *dev = tgtport->dev; |
| 1446 | |
| 1447 | nvmet_fc_free_ls_iodlist(tgtport); |
| 1448 | |
| 1449 | /* let the LLDD know we've finished tearing it down */ |
| 1450 | tgtport->ops->targetport_delete(&tgtport->fc_target_port); |
| 1451 | |
| 1452 | ida_free(&nvmet_fc_tgtport_cnt, |
| 1453 | id: tgtport->fc_target_port.port_num); |
| 1454 | |
| 1455 | ida_destroy(ida: &tgtport->assoc_cnt); |
| 1456 | |
| 1457 | kfree(objp: tgtport); |
| 1458 | |
| 1459 | put_device(dev); |
| 1460 | } |
| 1461 | |
| 1462 | static void |
| 1463 | nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport) |
| 1464 | { |
| 1465 | kref_put(kref: &tgtport->ref, release: nvmet_fc_free_tgtport); |
| 1466 | } |
| 1467 | |
| 1468 | static int |
| 1469 | nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport) |
| 1470 | { |
| 1471 | return kref_get_unless_zero(kref: &tgtport->ref); |
| 1472 | } |
| 1473 | |
| 1474 | static void |
| 1475 | __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport) |
| 1476 | { |
| 1477 | struct nvmet_fc_tgt_assoc *assoc; |
| 1478 | |
| 1479 | rcu_read_lock(); |
| 1480 | list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { |
| 1481 | if (!nvmet_fc_tgt_a_get(assoc)) |
| 1482 | continue; |
| 1483 | nvmet_fc_schedule_delete_assoc(assoc); |
| 1484 | nvmet_fc_tgt_a_put(assoc); |
| 1485 | } |
| 1486 | rcu_read_unlock(); |
| 1487 | } |
| 1488 | |
| 1489 | /** |
| 1490 | * nvmet_fc_invalidate_host - transport entry point called by an LLDD |
| 1491 | * to remove references to a hosthandle for LS's. |
| 1492 | * |
| 1493 | * The nvmet-fc layer ensures that any references to the hosthandle |
| 1494 | * on the targetport are forgotten (set to NULL). The LLDD will |
| 1495 | * typically call this when a login with a remote host port has been |
| 1496 | * lost, thus LS's for the remote host port are no longer possible. |
| 1497 | * |
| 1498 | * If an LS request is outstanding to the targetport/hosthandle (or |
| 1499 | * issued concurrently with the call to invalidate the host), the |
| 1500 | * LLDD is responsible for terminating/aborting the LS and completing |
| 1501 | * the LS request. It is recommended that these terminations/aborts |
| 1502 | * occur after calling to invalidate the host handle to avoid additional |
| 1503 | * retries by the nvmet-fc transport. The nvmet-fc transport may |
| 1504 | * continue to reference host handle while it cleans up outstanding |
| 1505 | * NVME associations. The nvmet-fc transport will call the |
| 1506 | * ops->host_release() callback to notify the LLDD that all references |
| 1507 | * are complete and the related host handle can be recovered. |
| 1508 | * Note: if there are no references, the callback may be called before |
| 1509 | * the invalidate host call returns. |
| 1510 | * |
| 1511 | * @target_port: pointer to the (registered) target port that a prior |
| 1512 | * LS was received on and which supplied the transport the |
| 1513 | * hosthandle. |
| 1514 | * @hosthandle: the handle (pointer) that represents the host port |
| 1515 | * that no longer has connectivity and that LS's should |
| 1516 | * no longer be directed to. |
| 1517 | */ |
| 1518 | void |
| 1519 | nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port, |
| 1520 | void *hosthandle) |
| 1521 | { |
| 1522 | struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(targetport: target_port); |
| 1523 | struct nvmet_fc_tgt_assoc *assoc, *next; |
| 1524 | unsigned long flags; |
| 1525 | bool noassoc = true; |
| 1526 | |
| 1527 | spin_lock_irqsave(&tgtport->lock, flags); |
| 1528 | list_for_each_entry_safe(assoc, next, |
| 1529 | &tgtport->assoc_list, a_list) { |
| 1530 | if (assoc->hostport->hosthandle != hosthandle) |
| 1531 | continue; |
| 1532 | if (!nvmet_fc_tgt_a_get(assoc)) |
| 1533 | continue; |
| 1534 | assoc->hostport->invalid = 1; |
| 1535 | noassoc = false; |
| 1536 | nvmet_fc_schedule_delete_assoc(assoc); |
| 1537 | nvmet_fc_tgt_a_put(assoc); |
| 1538 | } |
| 1539 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
| 1540 | |
| 1541 | /* if there's nothing to wait for - call the callback */ |
| 1542 | if (noassoc && tgtport->ops->host_release) |
| 1543 | tgtport->ops->host_release(hosthandle); |
| 1544 | } |
| 1545 | EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host); |
| 1546 | |
| 1547 | /* |
| 1548 | * nvmet layer has called to terminate an association |
| 1549 | */ |
| 1550 | static void |
| 1551 | nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) |
| 1552 | { |
| 1553 | struct nvmet_fc_tgtport *tgtport, *next; |
| 1554 | struct nvmet_fc_tgt_assoc *assoc; |
| 1555 | struct nvmet_fc_tgt_queue *queue; |
| 1556 | unsigned long flags; |
| 1557 | bool found_ctrl = false; |
| 1558 | |
| 1559 | /* this is a bit ugly, but don't want to make locks layered */ |
| 1560 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
| 1561 | list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list, |
| 1562 | tgt_list) { |
| 1563 | if (!nvmet_fc_tgtport_get(tgtport)) |
| 1564 | continue; |
| 1565 | spin_unlock_irqrestore(lock: &nvmet_fc_tgtlock, flags); |
| 1566 | |
| 1567 | rcu_read_lock(); |
| 1568 | list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { |
| 1569 | queue = assoc->queues[0]; |
| 1570 | if (queue && queue->nvme_sq.ctrl == ctrl) { |
| 1571 | if (nvmet_fc_tgt_a_get(assoc)) |
| 1572 | found_ctrl = true; |
| 1573 | break; |
| 1574 | } |
| 1575 | } |
| 1576 | rcu_read_unlock(); |
| 1577 | |
| 1578 | nvmet_fc_tgtport_put(tgtport); |
| 1579 | |
| 1580 | if (found_ctrl) { |
| 1581 | nvmet_fc_schedule_delete_assoc(assoc); |
| 1582 | nvmet_fc_tgt_a_put(assoc); |
| 1583 | return; |
| 1584 | } |
| 1585 | |
| 1586 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
| 1587 | } |
| 1588 | spin_unlock_irqrestore(lock: &nvmet_fc_tgtlock, flags); |
| 1589 | } |
| 1590 | |
| 1591 | static void |
| 1592 | nvmet_fc_free_pending_reqs(struct nvmet_fc_tgtport *tgtport) |
| 1593 | { |
| 1594 | struct nvmet_fc_ls_req_op *lsop; |
| 1595 | struct nvmefc_ls_req *lsreq; |
| 1596 | struct nvmet_fc_ls_iod *iod; |
| 1597 | int i; |
| 1598 | |
| 1599 | iod = tgtport->iod; |
| 1600 | for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) |
| 1601 | cancel_work(work: &iod->work); |
| 1602 | |
| 1603 | /* |
| 1604 | * After this point the connection is lost and thus any pending |
| 1605 | * request can't be processed by the normal completion path. This |
| 1606 | * is likely a request from nvmet_fc_send_ls_req_async. |
| 1607 | */ |
| 1608 | while ((lsop = list_first_entry_or_null(&tgtport->ls_req_list, |
| 1609 | struct nvmet_fc_ls_req_op, lsreq_list))) { |
| 1610 | list_del(entry: &lsop->lsreq_list); |
| 1611 | |
| 1612 | if (!lsop->req_queued) |
| 1613 | continue; |
| 1614 | |
| 1615 | lsreq = &lsop->ls_req; |
| 1616 | fc_dma_unmap_single(dev: tgtport->dev, addr: lsreq->rqstdma, |
| 1617 | size: (lsreq->rqstlen + lsreq->rsplen), |
| 1618 | dir: DMA_BIDIRECTIONAL); |
| 1619 | nvmet_fc_tgtport_put(tgtport); |
| 1620 | kfree(objp: lsop); |
| 1621 | } |
| 1622 | } |
| 1623 | |
| 1624 | /** |
| 1625 | * nvmet_fc_unregister_targetport - transport entry point called by an |
| 1626 | * LLDD to deregister/remove a previously |
| 1627 | * registered a local NVME subsystem FC port. |
| 1628 | * @target_port: pointer to the (registered) target port that is to be |
| 1629 | * deregistered. |
| 1630 | * |
| 1631 | * Returns: |
| 1632 | * a completion status. Must be 0 upon success; a negative errno |
| 1633 | * (ex: -ENXIO) upon failure. |
| 1634 | */ |
| 1635 | int |
| 1636 | nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port) |
| 1637 | { |
| 1638 | struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(targetport: target_port); |
| 1639 | unsigned long flags; |
| 1640 | |
| 1641 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
| 1642 | list_del(entry: &tgtport->tgt_list); |
| 1643 | spin_unlock_irqrestore(lock: &nvmet_fc_tgtlock, flags); |
| 1644 | |
| 1645 | nvmet_fc_portentry_unbind_tgt(tgtport); |
| 1646 | |
| 1647 | /* terminate any outstanding associations */ |
| 1648 | __nvmet_fc_free_assocs(tgtport); |
| 1649 | |
| 1650 | flush_workqueue(nvmet_wq); |
| 1651 | |
| 1652 | nvmet_fc_free_pending_reqs(tgtport); |
| 1653 | nvmet_fc_tgtport_put(tgtport); |
| 1654 | |
| 1655 | return 0; |
| 1656 | } |
| 1657 | EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport); |
| 1658 | |
| 1659 | |
| 1660 | /* ********************** FC-NVME LS RCV Handling ************************* */ |
| 1661 | |
| 1662 | |
| 1663 | static void |
| 1664 | nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, |
| 1665 | struct nvmet_fc_ls_iod *iod) |
| 1666 | { |
| 1667 | struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc; |
| 1668 | struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc; |
| 1669 | struct nvmet_fc_tgt_queue *queue; |
| 1670 | int ret = 0; |
| 1671 | |
| 1672 | memset(acc, 0, sizeof(*acc)); |
| 1673 | |
| 1674 | /* |
| 1675 | * FC-NVME spec changes. There are initiators sending different |
| 1676 | * lengths as padding sizes for Create Association Cmd descriptor |
| 1677 | * was incorrect. |
| 1678 | * Accept anything of "minimum" length. Assume format per 1.15 |
| 1679 | * spec (with HOSTID reduced to 16 bytes), ignore how long the |
| 1680 | * trailing pad length is. |
| 1681 | */ |
| 1682 | if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN) |
| 1683 | ret = VERR_CR_ASSOC_LEN; |
| 1684 | else if (be32_to_cpu(rqst->desc_list_len) < |
| 1685 | FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN) |
| 1686 | ret = VERR_CR_ASSOC_RQST_LEN; |
| 1687 | else if (rqst->assoc_cmd.desc_tag != |
| 1688 | cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD)) |
| 1689 | ret = VERR_CR_ASSOC_CMD; |
| 1690 | else if (be32_to_cpu(rqst->assoc_cmd.desc_len) < |
| 1691 | FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN) |
| 1692 | ret = VERR_CR_ASSOC_CMD_LEN; |
| 1693 | else if (!rqst->assoc_cmd.ersp_ratio || |
| 1694 | (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >= |
| 1695 | be16_to_cpu(rqst->assoc_cmd.sqsize))) |
| 1696 | ret = VERR_ERSP_RATIO; |
| 1697 | |
| 1698 | else { |
| 1699 | /* new association w/ admin queue */ |
| 1700 | iod->assoc = nvmet_fc_alloc_target_assoc( |
| 1701 | tgtport, hosthandle: iod->hosthandle); |
| 1702 | if (!iod->assoc) |
| 1703 | ret = VERR_ASSOC_ALLOC_FAIL; |
| 1704 | else { |
| 1705 | queue = nvmet_fc_alloc_target_queue(assoc: iod->assoc, qid: 0, |
| 1706 | be16_to_cpu(rqst->assoc_cmd.sqsize)); |
| 1707 | if (!queue) { |
| 1708 | ret = VERR_QUEUE_ALLOC_FAIL; |
| 1709 | nvmet_fc_tgt_a_put(assoc: iod->assoc); |
| 1710 | } |
| 1711 | } |
| 1712 | } |
| 1713 | |
| 1714 | if (ret) { |
| 1715 | pr_err("{%d}: Create Association LS failed: %s\n" , |
| 1716 | tgtport->fc_target_port.port_num, |
| 1717 | validation_errors[ret]); |
| 1718 | iod->lsrsp->rsplen = nvme_fc_format_rjt(buf: acc, |
| 1719 | buflen: sizeof(*acc), ls_cmd: rqst->w0.ls_cmd, |
| 1720 | reason: FCNVME_RJT_RC_LOGIC, |
| 1721 | explanation: FCNVME_RJT_EXP_NONE, vendor: 0); |
| 1722 | return; |
| 1723 | } |
| 1724 | |
| 1725 | queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio); |
| 1726 | atomic_set(v: &queue->connected, i: 1); |
| 1727 | queue->sqhd = 0; /* best place to init value */ |
| 1728 | |
| 1729 | pr_info("{%d:%d}: Association created\n" , |
| 1730 | tgtport->fc_target_port.port_num, iod->assoc->a_id); |
| 1731 | |
| 1732 | /* format a response */ |
| 1733 | |
| 1734 | iod->lsrsp->rsplen = sizeof(*acc); |
| 1735 | |
| 1736 | nvme_fc_format_rsp_hdr(buf: acc, ls_cmd: FCNVME_LS_ACC, |
| 1737 | desc_len: fcnvme_lsdesc_len( |
| 1738 | sz: sizeof(struct fcnvme_ls_cr_assoc_acc)), |
| 1739 | rqst_ls_cmd: FCNVME_LS_CREATE_ASSOCIATION); |
| 1740 | acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); |
| 1741 | acc->associd.desc_len = |
| 1742 | fcnvme_lsdesc_len( |
| 1743 | sz: sizeof(struct fcnvme_lsdesc_assoc_id)); |
| 1744 | acc->associd.association_id = |
| 1745 | cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0)); |
| 1746 | acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); |
| 1747 | acc->connectid.desc_len = |
| 1748 | fcnvme_lsdesc_len( |
| 1749 | sz: sizeof(struct fcnvme_lsdesc_conn_id)); |
| 1750 | acc->connectid.connection_id = acc->associd.association_id; |
| 1751 | } |
| 1752 | |
| 1753 | static void |
| 1754 | nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport, |
| 1755 | struct nvmet_fc_ls_iod *iod) |
| 1756 | { |
| 1757 | struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn; |
| 1758 | struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn; |
| 1759 | struct nvmet_fc_tgt_queue *queue; |
| 1760 | int ret = 0; |
| 1761 | |
| 1762 | memset(acc, 0, sizeof(*acc)); |
| 1763 | |
| 1764 | if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst)) |
| 1765 | ret = VERR_CR_CONN_LEN; |
| 1766 | else if (rqst->desc_list_len != |
| 1767 | fcnvme_lsdesc_len( |
| 1768 | sz: sizeof(struct fcnvme_ls_cr_conn_rqst))) |
| 1769 | ret = VERR_CR_CONN_RQST_LEN; |
| 1770 | else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) |
| 1771 | ret = VERR_ASSOC_ID; |
| 1772 | else if (rqst->associd.desc_len != |
| 1773 | fcnvme_lsdesc_len( |
| 1774 | sz: sizeof(struct fcnvme_lsdesc_assoc_id))) |
| 1775 | ret = VERR_ASSOC_ID_LEN; |
| 1776 | else if (rqst->connect_cmd.desc_tag != |
| 1777 | cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD)) |
| 1778 | ret = VERR_CR_CONN_CMD; |
| 1779 | else if (rqst->connect_cmd.desc_len != |
| 1780 | fcnvme_lsdesc_len( |
| 1781 | sz: sizeof(struct fcnvme_lsdesc_cr_conn_cmd))) |
| 1782 | ret = VERR_CR_CONN_CMD_LEN; |
| 1783 | else if (!rqst->connect_cmd.ersp_ratio || |
| 1784 | (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >= |
| 1785 | be16_to_cpu(rqst->connect_cmd.sqsize))) |
| 1786 | ret = VERR_ERSP_RATIO; |
| 1787 | |
| 1788 | else { |
| 1789 | /* new io queue */ |
| 1790 | iod->assoc = nvmet_fc_find_target_assoc(tgtport, |
| 1791 | be64_to_cpu(rqst->associd.association_id)); |
| 1792 | if (!iod->assoc) |
| 1793 | ret = VERR_NO_ASSOC; |
| 1794 | else { |
| 1795 | queue = nvmet_fc_alloc_target_queue(assoc: iod->assoc, |
| 1796 | be16_to_cpu(rqst->connect_cmd.qid), |
| 1797 | be16_to_cpu(rqst->connect_cmd.sqsize)); |
| 1798 | if (!queue) |
| 1799 | ret = VERR_QUEUE_ALLOC_FAIL; |
| 1800 | |
| 1801 | /* release get taken in nvmet_fc_find_target_assoc */ |
| 1802 | nvmet_fc_tgt_a_put(assoc: iod->assoc); |
| 1803 | } |
| 1804 | } |
| 1805 | |
| 1806 | if (ret) { |
| 1807 | pr_err("{%d}: Create Connection LS failed: %s\n" , |
| 1808 | tgtport->fc_target_port.port_num, |
| 1809 | validation_errors[ret]); |
| 1810 | iod->lsrsp->rsplen = nvme_fc_format_rjt(buf: acc, |
| 1811 | buflen: sizeof(*acc), ls_cmd: rqst->w0.ls_cmd, |
| 1812 | reason: (ret == VERR_NO_ASSOC) ? |
| 1813 | FCNVME_RJT_RC_INV_ASSOC : |
| 1814 | FCNVME_RJT_RC_LOGIC, |
| 1815 | explanation: FCNVME_RJT_EXP_NONE, vendor: 0); |
| 1816 | return; |
| 1817 | } |
| 1818 | |
| 1819 | queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio); |
| 1820 | atomic_set(v: &queue->connected, i: 1); |
| 1821 | queue->sqhd = 0; /* best place to init value */ |
| 1822 | |
| 1823 | /* format a response */ |
| 1824 | |
| 1825 | iod->lsrsp->rsplen = sizeof(*acc); |
| 1826 | |
| 1827 | nvme_fc_format_rsp_hdr(buf: acc, ls_cmd: FCNVME_LS_ACC, |
| 1828 | desc_len: fcnvme_lsdesc_len(sz: sizeof(struct fcnvme_ls_cr_conn_acc)), |
| 1829 | rqst_ls_cmd: FCNVME_LS_CREATE_CONNECTION); |
| 1830 | acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); |
| 1831 | acc->connectid.desc_len = |
| 1832 | fcnvme_lsdesc_len( |
| 1833 | sz: sizeof(struct fcnvme_lsdesc_conn_id)); |
| 1834 | acc->connectid.connection_id = |
| 1835 | cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, |
| 1836 | be16_to_cpu(rqst->connect_cmd.qid))); |
| 1837 | } |
| 1838 | |
| 1839 | /* |
| 1840 | * Returns true if the LS response is to be transmit |
| 1841 | * Returns false if the LS response is to be delayed |
| 1842 | */ |
| 1843 | static int |
| 1844 | nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, |
| 1845 | struct nvmet_fc_ls_iod *iod) |
| 1846 | { |
| 1847 | struct fcnvme_ls_disconnect_assoc_rqst *rqst = |
| 1848 | &iod->rqstbuf->rq_dis_assoc; |
| 1849 | struct fcnvme_ls_disconnect_assoc_acc *acc = |
| 1850 | &iod->rspbuf->rsp_dis_assoc; |
| 1851 | struct nvmet_fc_tgt_assoc *assoc = NULL; |
| 1852 | struct nvmet_fc_ls_iod *oldls = NULL; |
| 1853 | unsigned long flags; |
| 1854 | int ret = 0; |
| 1855 | |
| 1856 | memset(acc, 0, sizeof(*acc)); |
| 1857 | |
| 1858 | ret = nvmefc_vldt_lsreq_discon_assoc(rqstlen: iod->rqstdatalen, rqst); |
| 1859 | if (!ret) { |
| 1860 | /* match an active association - takes an assoc ref if !NULL */ |
| 1861 | assoc = nvmet_fc_find_target_assoc(tgtport, |
| 1862 | be64_to_cpu(rqst->associd.association_id)); |
| 1863 | iod->assoc = assoc; |
| 1864 | if (!assoc) |
| 1865 | ret = VERR_NO_ASSOC; |
| 1866 | } |
| 1867 | |
| 1868 | if (ret || !assoc) { |
| 1869 | pr_err("{%d}: Disconnect LS failed: %s\n" , |
| 1870 | tgtport->fc_target_port.port_num, |
| 1871 | validation_errors[ret]); |
| 1872 | iod->lsrsp->rsplen = nvme_fc_format_rjt(buf: acc, |
| 1873 | buflen: sizeof(*acc), ls_cmd: rqst->w0.ls_cmd, |
| 1874 | reason: (ret == VERR_NO_ASSOC) ? |
| 1875 | FCNVME_RJT_RC_INV_ASSOC : |
| 1876 | FCNVME_RJT_RC_LOGIC, |
| 1877 | explanation: FCNVME_RJT_EXP_NONE, vendor: 0); |
| 1878 | return true; |
| 1879 | } |
| 1880 | |
| 1881 | /* format a response */ |
| 1882 | |
| 1883 | iod->lsrsp->rsplen = sizeof(*acc); |
| 1884 | |
| 1885 | nvme_fc_format_rsp_hdr(buf: acc, ls_cmd: FCNVME_LS_ACC, |
| 1886 | desc_len: fcnvme_lsdesc_len( |
| 1887 | sz: sizeof(struct fcnvme_ls_disconnect_assoc_acc)), |
| 1888 | rqst_ls_cmd: FCNVME_LS_DISCONNECT_ASSOC); |
| 1889 | |
| 1890 | /* |
| 1891 | * The rules for LS response says the response cannot |
| 1892 | * go back until ABTS's have been sent for all outstanding |
| 1893 | * I/O and a Disconnect Association LS has been sent. |
| 1894 | * So... save off the Disconnect LS to send the response |
| 1895 | * later. If there was a prior LS already saved, replace |
| 1896 | * it with the newer one and send a can't perform reject |
| 1897 | * on the older one. |
| 1898 | */ |
| 1899 | spin_lock_irqsave(&tgtport->lock, flags); |
| 1900 | oldls = assoc->rcv_disconn; |
| 1901 | assoc->rcv_disconn = iod; |
| 1902 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
| 1903 | |
| 1904 | if (oldls) { |
| 1905 | pr_info("{%d:%d}: Multiple Disconnect Association LS's " |
| 1906 | "received\n" , |
| 1907 | tgtport->fc_target_port.port_num, assoc->a_id); |
| 1908 | /* overwrite good response with bogus failure */ |
| 1909 | oldls->lsrsp->rsplen = nvme_fc_format_rjt(buf: oldls->rspbuf, |
| 1910 | buflen: sizeof(*iod->rspbuf), |
| 1911 | /* ok to use rqst, LS is same */ |
| 1912 | ls_cmd: rqst->w0.ls_cmd, |
| 1913 | reason: FCNVME_RJT_RC_UNAB, |
| 1914 | explanation: FCNVME_RJT_EXP_NONE, vendor: 0); |
| 1915 | nvmet_fc_xmt_ls_rsp(tgtport, iod: oldls); |
| 1916 | } |
| 1917 | |
| 1918 | nvmet_fc_schedule_delete_assoc(assoc); |
| 1919 | nvmet_fc_tgt_a_put(assoc); |
| 1920 | |
| 1921 | return false; |
| 1922 | } |
| 1923 | |
| 1924 | |
| 1925 | /* *********************** NVME Ctrl Routines **************************** */ |
| 1926 | |
| 1927 | |
| 1928 | static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req); |
| 1929 | |
| 1930 | static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops; |
| 1931 | |
| 1932 | static void |
| 1933 | nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) |
| 1934 | { |
| 1935 | struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private; |
| 1936 | struct nvmet_fc_tgtport *tgtport = iod->tgtport; |
| 1937 | |
| 1938 | fc_dma_sync_single_for_cpu(dev: tgtport->dev, addr: iod->rspdma, |
| 1939 | size: sizeof(*iod->rspbuf), dir: DMA_TO_DEVICE); |
| 1940 | nvmet_fc_free_ls_iod(tgtport, iod); |
| 1941 | nvmet_fc_tgtport_put(tgtport); |
| 1942 | } |
| 1943 | |
| 1944 | static void |
| 1945 | nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, |
| 1946 | struct nvmet_fc_ls_iod *iod) |
| 1947 | { |
| 1948 | int ret; |
| 1949 | |
| 1950 | fc_dma_sync_single_for_device(dev: tgtport->dev, addr: iod->rspdma, |
| 1951 | size: sizeof(*iod->rspbuf), dir: DMA_TO_DEVICE); |
| 1952 | |
| 1953 | ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp); |
| 1954 | if (ret) |
| 1955 | nvmet_fc_xmt_ls_rsp_done(lsrsp: iod->lsrsp); |
| 1956 | } |
| 1957 | |
| 1958 | /* |
| 1959 | * Actual processing routine for received FC-NVME LS Requests from the LLD |
| 1960 | */ |
| 1961 | static void |
| 1962 | nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport, |
| 1963 | struct nvmet_fc_ls_iod *iod) |
| 1964 | { |
| 1965 | struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0; |
| 1966 | bool sendrsp = true; |
| 1967 | |
| 1968 | iod->lsrsp->nvme_fc_private = iod; |
| 1969 | iod->lsrsp->rspbuf = iod->rspbuf; |
| 1970 | iod->lsrsp->rspdma = iod->rspdma; |
| 1971 | iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done; |
| 1972 | /* Be preventative. handlers will later set to valid length */ |
| 1973 | iod->lsrsp->rsplen = 0; |
| 1974 | |
| 1975 | iod->assoc = NULL; |
| 1976 | |
| 1977 | /* |
| 1978 | * handlers: |
| 1979 | * parse request input, execute the request, and format the |
| 1980 | * LS response |
| 1981 | */ |
| 1982 | switch (w0->ls_cmd) { |
| 1983 | case FCNVME_LS_CREATE_ASSOCIATION: |
| 1984 | /* Creates Association and initial Admin Queue/Connection */ |
| 1985 | nvmet_fc_ls_create_association(tgtport, iod); |
| 1986 | break; |
| 1987 | case FCNVME_LS_CREATE_CONNECTION: |
| 1988 | /* Creates an IO Queue/Connection */ |
| 1989 | nvmet_fc_ls_create_connection(tgtport, iod); |
| 1990 | break; |
| 1991 | case FCNVME_LS_DISCONNECT_ASSOC: |
| 1992 | /* Terminate a Queue/Connection or the Association */ |
| 1993 | sendrsp = nvmet_fc_ls_disconnect(tgtport, iod); |
| 1994 | break; |
| 1995 | default: |
| 1996 | iod->lsrsp->rsplen = nvme_fc_format_rjt(buf: iod->rspbuf, |
| 1997 | buflen: sizeof(*iod->rspbuf), ls_cmd: w0->ls_cmd, |
| 1998 | reason: FCNVME_RJT_RC_INVAL, explanation: FCNVME_RJT_EXP_NONE, vendor: 0); |
| 1999 | } |
| 2000 | |
| 2001 | if (sendrsp) |
| 2002 | nvmet_fc_xmt_ls_rsp(tgtport, iod); |
| 2003 | } |
| 2004 | |
| 2005 | /* |
| 2006 | * Actual processing routine for received FC-NVME LS Requests from the LLD |
| 2007 | */ |
| 2008 | static void |
| 2009 | nvmet_fc_handle_ls_rqst_work(struct work_struct *work) |
| 2010 | { |
| 2011 | struct nvmet_fc_ls_iod *iod = |
| 2012 | container_of(work, struct nvmet_fc_ls_iod, work); |
| 2013 | struct nvmet_fc_tgtport *tgtport = iod->tgtport; |
| 2014 | |
| 2015 | nvmet_fc_handle_ls_rqst(tgtport, iod); |
| 2016 | } |
| 2017 | |
| 2018 | |
| 2019 | /** |
| 2020 | * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD |
| 2021 | * upon the reception of a NVME LS request. |
| 2022 | * |
| 2023 | * The nvmet-fc layer will copy payload to an internal structure for |
| 2024 | * processing. As such, upon completion of the routine, the LLDD may |
| 2025 | * immediately free/reuse the LS request buffer passed in the call. |
| 2026 | * |
| 2027 | * If this routine returns error, the LLDD should abort the exchange. |
| 2028 | * |
| 2029 | * @target_port: pointer to the (registered) target port the LS was |
| 2030 | * received on. |
| 2031 | * @hosthandle: pointer to the host specific data, gets stored in iod. |
| 2032 | * @lsrsp: pointer to a lsrsp structure to be used to reference |
| 2033 | * the exchange corresponding to the LS. |
| 2034 | * @lsreqbuf: pointer to the buffer containing the LS Request |
| 2035 | * @lsreqbuf_len: length, in bytes, of the received LS request |
| 2036 | */ |
| 2037 | int |
| 2038 | nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port, |
| 2039 | void *hosthandle, |
| 2040 | struct nvmefc_ls_rsp *lsrsp, |
| 2041 | void *lsreqbuf, u32 lsreqbuf_len) |
| 2042 | { |
| 2043 | struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(targetport: target_port); |
| 2044 | struct nvmet_fc_ls_iod *iod; |
| 2045 | struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf; |
| 2046 | |
| 2047 | if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) { |
| 2048 | pr_info("{%d}: RCV %s LS failed: payload too large (%d)\n" , |
| 2049 | tgtport->fc_target_port.port_num, |
| 2050 | (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? |
| 2051 | nvmefc_ls_names[w0->ls_cmd] : "" , |
| 2052 | lsreqbuf_len); |
| 2053 | return -E2BIG; |
| 2054 | } |
| 2055 | |
| 2056 | if (!nvmet_fc_tgtport_get(tgtport)) { |
| 2057 | pr_info("{%d}: RCV %s LS failed: target deleting\n" , |
| 2058 | tgtport->fc_target_port.port_num, |
| 2059 | (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? |
| 2060 | nvmefc_ls_names[w0->ls_cmd] : "" ); |
| 2061 | return -ESHUTDOWN; |
| 2062 | } |
| 2063 | |
| 2064 | iod = nvmet_fc_alloc_ls_iod(tgtport); |
| 2065 | if (!iod) { |
| 2066 | pr_info("{%d}: RCV %s LS failed: context allocation failed\n" , |
| 2067 | tgtport->fc_target_port.port_num, |
| 2068 | (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? |
| 2069 | nvmefc_ls_names[w0->ls_cmd] : "" ); |
| 2070 | nvmet_fc_tgtport_put(tgtport); |
| 2071 | return -ENOENT; |
| 2072 | } |
| 2073 | |
| 2074 | iod->lsrsp = lsrsp; |
| 2075 | iod->fcpreq = NULL; |
| 2076 | memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len); |
| 2077 | iod->rqstdatalen = lsreqbuf_len; |
| 2078 | iod->hosthandle = hosthandle; |
| 2079 | |
| 2080 | queue_work(wq: nvmet_wq, work: &iod->work); |
| 2081 | |
| 2082 | return 0; |
| 2083 | } |
| 2084 | EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req); |
| 2085 | |
| 2086 | |
| 2087 | /* |
| 2088 | * ********************** |
| 2089 | * Start of FCP handling |
| 2090 | * ********************** |
| 2091 | */ |
| 2092 | |
| 2093 | static int |
| 2094 | nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod) |
| 2095 | { |
| 2096 | struct scatterlist *sg; |
| 2097 | unsigned int nent; |
| 2098 | |
| 2099 | sg = sgl_alloc(length: fod->req.transfer_len, GFP_KERNEL, nent_p: &nent); |
| 2100 | if (!sg) |
| 2101 | goto out; |
| 2102 | |
| 2103 | fod->data_sg = sg; |
| 2104 | fod->data_sg_cnt = nent; |
| 2105 | fod->data_sg_cnt = fc_dma_map_sg(dev: fod->tgtport->dev, sg, nents: nent, |
| 2106 | dir: ((fod->io_dir == NVMET_FCP_WRITE) ? |
| 2107 | DMA_FROM_DEVICE : DMA_TO_DEVICE)); |
| 2108 | /* note: write from initiator perspective */ |
| 2109 | fod->next_sg = fod->data_sg; |
| 2110 | |
| 2111 | return 0; |
| 2112 | |
| 2113 | out: |
| 2114 | return NVME_SC_INTERNAL; |
| 2115 | } |
| 2116 | |
| 2117 | static void |
| 2118 | nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod) |
| 2119 | { |
| 2120 | if (!fod->data_sg || !fod->data_sg_cnt) |
| 2121 | return; |
| 2122 | |
| 2123 | fc_dma_unmap_sg(dev: fod->tgtport->dev, sg: fod->data_sg, nents: fod->data_sg_cnt, |
| 2124 | dir: ((fod->io_dir == NVMET_FCP_WRITE) ? |
| 2125 | DMA_FROM_DEVICE : DMA_TO_DEVICE)); |
| 2126 | sgl_free(sgl: fod->data_sg); |
| 2127 | fod->data_sg = NULL; |
| 2128 | fod->data_sg_cnt = 0; |
| 2129 | } |
| 2130 | |
| 2131 | |
| 2132 | static bool |
| 2133 | queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd) |
| 2134 | { |
| 2135 | u32 sqtail, used; |
| 2136 | |
| 2137 | /* egad, this is ugly. And sqtail is just a best guess */ |
| 2138 | sqtail = atomic_read(v: &q->sqtail) % q->sqsize; |
| 2139 | |
| 2140 | used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd); |
| 2141 | return ((used * 10) >= (((u32)(q->sqsize - 1) * 9))); |
| 2142 | } |
| 2143 | |
| 2144 | /* |
| 2145 | * Prep RSP payload. |
| 2146 | * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op |
| 2147 | */ |
| 2148 | static void |
| 2149 | nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport, |
| 2150 | struct nvmet_fc_fcp_iod *fod) |
| 2151 | { |
| 2152 | struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf; |
| 2153 | struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; |
| 2154 | struct nvme_completion *cqe = &ersp->cqe; |
| 2155 | u32 *cqewd = (u32 *)cqe; |
| 2156 | bool send_ersp = false; |
| 2157 | u32 rsn, rspcnt, xfr_length; |
| 2158 | |
| 2159 | if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP) |
| 2160 | xfr_length = fod->req.transfer_len; |
| 2161 | else |
| 2162 | xfr_length = fod->offset; |
| 2163 | |
| 2164 | /* |
| 2165 | * check to see if we can send a 0's rsp. |
| 2166 | * Note: to send a 0's response, the NVME-FC host transport will |
| 2167 | * recreate the CQE. The host transport knows: sq id, SQHD (last |
| 2168 | * seen in an ersp), and command_id. Thus it will create a |
| 2169 | * zero-filled CQE with those known fields filled in. Transport |
| 2170 | * must send an ersp for any condition where the cqe won't match |
| 2171 | * this. |
| 2172 | * |
| 2173 | * Here are the FC-NVME mandated cases where we must send an ersp: |
| 2174 | * every N responses, where N=ersp_ratio |
| 2175 | * force fabric commands to send ersp's (not in FC-NVME but good |
| 2176 | * practice) |
| 2177 | * normal cmds: any time status is non-zero, or status is zero |
| 2178 | * but words 0 or 1 are non-zero. |
| 2179 | * the SQ is 90% or more full |
| 2180 | * the cmd is a fused command |
| 2181 | * transferred data length not equal to cmd iu length |
| 2182 | */ |
| 2183 | rspcnt = atomic_inc_return(v: &fod->queue->zrspcnt); |
| 2184 | if (!(rspcnt % fod->queue->ersp_ratio) || |
| 2185 | nvme_is_fabrics(cmd: (struct nvme_command *) sqe) || |
| 2186 | xfr_length != fod->req.transfer_len || |
| 2187 | (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] || |
| 2188 | (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) || |
| 2189 | queue_90percent_full(q: fod->queue, le16_to_cpu(cqe->sq_head))) |
| 2190 | send_ersp = true; |
| 2191 | |
| 2192 | /* re-set the fields */ |
| 2193 | fod->fcpreq->rspaddr = ersp; |
| 2194 | fod->fcpreq->rspdma = fod->rspdma; |
| 2195 | |
| 2196 | if (!send_ersp) { |
| 2197 | memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP); |
| 2198 | fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP; |
| 2199 | } else { |
| 2200 | ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32)); |
| 2201 | rsn = atomic_inc_return(v: &fod->queue->rsn); |
| 2202 | ersp->rsn = cpu_to_be32(rsn); |
| 2203 | ersp->xfrd_len = cpu_to_be32(xfr_length); |
| 2204 | fod->fcpreq->rsplen = sizeof(*ersp); |
| 2205 | } |
| 2206 | |
| 2207 | fc_dma_sync_single_for_device(dev: tgtport->dev, addr: fod->rspdma, |
| 2208 | size: sizeof(fod->rspiubuf), dir: DMA_TO_DEVICE); |
| 2209 | } |
| 2210 | |
| 2211 | static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq); |
| 2212 | |
| 2213 | static void |
| 2214 | nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport, |
| 2215 | struct nvmet_fc_fcp_iod *fod) |
| 2216 | { |
| 2217 | struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; |
| 2218 | |
| 2219 | /* data no longer needed */ |
| 2220 | nvmet_fc_free_tgt_pgs(fod); |
| 2221 | |
| 2222 | /* |
| 2223 | * if an ABTS was received or we issued the fcp_abort early |
| 2224 | * don't call abort routine again. |
| 2225 | */ |
| 2226 | /* no need to take lock - lock was taken earlier to get here */ |
| 2227 | if (!fod->aborted) |
| 2228 | tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq); |
| 2229 | |
| 2230 | nvmet_fc_free_fcp_iod(queue: fod->queue, fod); |
| 2231 | } |
| 2232 | |
| 2233 | static void |
| 2234 | nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport, |
| 2235 | struct nvmet_fc_fcp_iod *fod) |
| 2236 | { |
| 2237 | int ret; |
| 2238 | |
| 2239 | fod->fcpreq->op = NVMET_FCOP_RSP; |
| 2240 | fod->fcpreq->timeout = 0; |
| 2241 | |
| 2242 | nvmet_fc_prep_fcp_rsp(tgtport, fod); |
| 2243 | |
| 2244 | ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); |
| 2245 | if (ret) |
| 2246 | nvmet_fc_abort_op(tgtport, fod); |
| 2247 | } |
| 2248 | |
| 2249 | static void |
| 2250 | nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, |
| 2251 | struct nvmet_fc_fcp_iod *fod, u8 op) |
| 2252 | { |
| 2253 | struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; |
| 2254 | struct scatterlist *sg = fod->next_sg; |
| 2255 | unsigned long flags; |
| 2256 | u32 remaininglen = fod->req.transfer_len - fod->offset; |
| 2257 | u32 tlen = 0; |
| 2258 | int ret; |
| 2259 | |
| 2260 | fcpreq->op = op; |
| 2261 | fcpreq->offset = fod->offset; |
| 2262 | fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; |
| 2263 | |
| 2264 | /* |
| 2265 | * for next sequence: |
| 2266 | * break at a sg element boundary |
| 2267 | * attempt to keep sequence length capped at |
| 2268 | * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to |
| 2269 | * be longer if a single sg element is larger |
| 2270 | * than that amount. This is done to avoid creating |
| 2271 | * a new sg list to use for the tgtport api. |
| 2272 | */ |
| 2273 | fcpreq->sg = sg; |
| 2274 | fcpreq->sg_cnt = 0; |
| 2275 | while (tlen < remaininglen && |
| 2276 | fcpreq->sg_cnt < tgtport->max_sg_cnt && |
| 2277 | tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) { |
| 2278 | fcpreq->sg_cnt++; |
| 2279 | tlen += sg_dma_len(sg); |
| 2280 | sg = sg_next(sg); |
| 2281 | } |
| 2282 | if (tlen < remaininglen && fcpreq->sg_cnt == 0) { |
| 2283 | fcpreq->sg_cnt++; |
| 2284 | tlen += min_t(u32, sg_dma_len(sg), remaininglen); |
| 2285 | sg = sg_next(sg); |
| 2286 | } |
| 2287 | if (tlen < remaininglen) |
| 2288 | fod->next_sg = sg; |
| 2289 | else |
| 2290 | fod->next_sg = NULL; |
| 2291 | |
| 2292 | fcpreq->transfer_length = tlen; |
| 2293 | fcpreq->transferred_length = 0; |
| 2294 | fcpreq->fcp_error = 0; |
| 2295 | fcpreq->rsplen = 0; |
| 2296 | |
| 2297 | /* |
| 2298 | * If the last READDATA request: check if LLDD supports |
| 2299 | * combined xfr with response. |
| 2300 | */ |
| 2301 | if ((op == NVMET_FCOP_READDATA) && |
| 2302 | ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) && |
| 2303 | (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) { |
| 2304 | fcpreq->op = NVMET_FCOP_READDATA_RSP; |
| 2305 | nvmet_fc_prep_fcp_rsp(tgtport, fod); |
| 2306 | } |
| 2307 | |
| 2308 | ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); |
| 2309 | if (ret) { |
| 2310 | /* |
| 2311 | * should be ok to set w/o lock as it's in the thread of |
| 2312 | * execution (not an async timer routine) and doesn't |
| 2313 | * contend with any clearing action |
| 2314 | */ |
| 2315 | fod->abort = true; |
| 2316 | |
| 2317 | if (op == NVMET_FCOP_WRITEDATA) { |
| 2318 | spin_lock_irqsave(&fod->flock, flags); |
| 2319 | fod->writedataactive = false; |
| 2320 | spin_unlock_irqrestore(lock: &fod->flock, flags); |
| 2321 | nvmet_req_complete(req: &fod->req, status: NVME_SC_INTERNAL); |
| 2322 | } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ { |
| 2323 | fcpreq->fcp_error = ret; |
| 2324 | fcpreq->transferred_length = 0; |
| 2325 | nvmet_fc_xmt_fcp_op_done(fcpreq: fod->fcpreq); |
| 2326 | } |
| 2327 | } |
| 2328 | } |
| 2329 | |
| 2330 | static inline bool |
| 2331 | __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort) |
| 2332 | { |
| 2333 | struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; |
| 2334 | struct nvmet_fc_tgtport *tgtport = fod->tgtport; |
| 2335 | |
| 2336 | /* if in the middle of an io and we need to tear down */ |
| 2337 | if (abort) { |
| 2338 | if (fcpreq->op == NVMET_FCOP_WRITEDATA) { |
| 2339 | nvmet_req_complete(req: &fod->req, status: NVME_SC_INTERNAL); |
| 2340 | return true; |
| 2341 | } |
| 2342 | |
| 2343 | nvmet_fc_abort_op(tgtport, fod); |
| 2344 | return true; |
| 2345 | } |
| 2346 | |
| 2347 | return false; |
| 2348 | } |
| 2349 | |
| 2350 | /* |
| 2351 | * actual done handler for FCP operations when completed by the lldd |
| 2352 | */ |
| 2353 | static void |
| 2354 | nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) |
| 2355 | { |
| 2356 | struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; |
| 2357 | struct nvmet_fc_tgtport *tgtport = fod->tgtport; |
| 2358 | unsigned long flags; |
| 2359 | bool abort; |
| 2360 | |
| 2361 | spin_lock_irqsave(&fod->flock, flags); |
| 2362 | abort = fod->abort; |
| 2363 | fod->writedataactive = false; |
| 2364 | spin_unlock_irqrestore(lock: &fod->flock, flags); |
| 2365 | |
| 2366 | switch (fcpreq->op) { |
| 2367 | |
| 2368 | case NVMET_FCOP_WRITEDATA: |
| 2369 | if (__nvmet_fc_fod_op_abort(fod, abort)) |
| 2370 | return; |
| 2371 | if (fcpreq->fcp_error || |
| 2372 | fcpreq->transferred_length != fcpreq->transfer_length) { |
| 2373 | spin_lock_irqsave(&fod->flock, flags); |
| 2374 | fod->abort = true; |
| 2375 | spin_unlock_irqrestore(lock: &fod->flock, flags); |
| 2376 | |
| 2377 | nvmet_req_complete(req: &fod->req, status: NVME_SC_INTERNAL); |
| 2378 | return; |
| 2379 | } |
| 2380 | |
| 2381 | fod->offset += fcpreq->transferred_length; |
| 2382 | if (fod->offset != fod->req.transfer_len) { |
| 2383 | spin_lock_irqsave(&fod->flock, flags); |
| 2384 | fod->writedataactive = true; |
| 2385 | spin_unlock_irqrestore(lock: &fod->flock, flags); |
| 2386 | |
| 2387 | /* transfer the next chunk */ |
| 2388 | nvmet_fc_transfer_fcp_data(tgtport, fod, |
| 2389 | op: NVMET_FCOP_WRITEDATA); |
| 2390 | return; |
| 2391 | } |
| 2392 | |
| 2393 | /* data transfer complete, resume with nvmet layer */ |
| 2394 | fod->req.execute(&fod->req); |
| 2395 | break; |
| 2396 | |
| 2397 | case NVMET_FCOP_READDATA: |
| 2398 | case NVMET_FCOP_READDATA_RSP: |
| 2399 | if (__nvmet_fc_fod_op_abort(fod, abort)) |
| 2400 | return; |
| 2401 | if (fcpreq->fcp_error || |
| 2402 | fcpreq->transferred_length != fcpreq->transfer_length) { |
| 2403 | nvmet_fc_abort_op(tgtport, fod); |
| 2404 | return; |
| 2405 | } |
| 2406 | |
| 2407 | /* success */ |
| 2408 | |
| 2409 | if (fcpreq->op == NVMET_FCOP_READDATA_RSP) { |
| 2410 | /* data no longer needed */ |
| 2411 | nvmet_fc_free_tgt_pgs(fod); |
| 2412 | nvmet_fc_free_fcp_iod(queue: fod->queue, fod); |
| 2413 | return; |
| 2414 | } |
| 2415 | |
| 2416 | fod->offset += fcpreq->transferred_length; |
| 2417 | if (fod->offset != fod->req.transfer_len) { |
| 2418 | /* transfer the next chunk */ |
| 2419 | nvmet_fc_transfer_fcp_data(tgtport, fod, |
| 2420 | op: NVMET_FCOP_READDATA); |
| 2421 | return; |
| 2422 | } |
| 2423 | |
| 2424 | /* data transfer complete, send response */ |
| 2425 | |
| 2426 | /* data no longer needed */ |
| 2427 | nvmet_fc_free_tgt_pgs(fod); |
| 2428 | |
| 2429 | nvmet_fc_xmt_fcp_rsp(tgtport, fod); |
| 2430 | |
| 2431 | break; |
| 2432 | |
| 2433 | case NVMET_FCOP_RSP: |
| 2434 | if (__nvmet_fc_fod_op_abort(fod, abort)) |
| 2435 | return; |
| 2436 | nvmet_fc_free_fcp_iod(queue: fod->queue, fod); |
| 2437 | break; |
| 2438 | |
| 2439 | default: |
| 2440 | break; |
| 2441 | } |
| 2442 | } |
| 2443 | |
| 2444 | static void |
| 2445 | nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq) |
| 2446 | { |
| 2447 | struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; |
| 2448 | |
| 2449 | nvmet_fc_fod_op_done(fod); |
| 2450 | } |
| 2451 | |
| 2452 | /* |
| 2453 | * actual completion handler after execution by the nvmet layer |
| 2454 | */ |
| 2455 | static void |
| 2456 | __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport, |
| 2457 | struct nvmet_fc_fcp_iod *fod, int status) |
| 2458 | { |
| 2459 | struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; |
| 2460 | struct nvme_completion *cqe = &fod->rspiubuf.cqe; |
| 2461 | unsigned long flags; |
| 2462 | bool abort; |
| 2463 | |
| 2464 | spin_lock_irqsave(&fod->flock, flags); |
| 2465 | abort = fod->abort; |
| 2466 | spin_unlock_irqrestore(lock: &fod->flock, flags); |
| 2467 | |
| 2468 | /* if we have a CQE, snoop the last sq_head value */ |
| 2469 | if (!status) |
| 2470 | fod->queue->sqhd = cqe->sq_head; |
| 2471 | |
| 2472 | if (abort) { |
| 2473 | nvmet_fc_abort_op(tgtport, fod); |
| 2474 | return; |
| 2475 | } |
| 2476 | |
| 2477 | /* if an error handling the cmd post initial parsing */ |
| 2478 | if (status) { |
| 2479 | /* fudge up a failed CQE status for our transport error */ |
| 2480 | memset(cqe, 0, sizeof(*cqe)); |
| 2481 | cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */ |
| 2482 | cqe->sq_id = cpu_to_le16(fod->queue->qid); |
| 2483 | cqe->command_id = sqe->command_id; |
| 2484 | cqe->status = cpu_to_le16(status); |
| 2485 | } else { |
| 2486 | |
| 2487 | /* |
| 2488 | * try to push the data even if the SQE status is non-zero. |
| 2489 | * There may be a status where data still was intended to |
| 2490 | * be moved |
| 2491 | */ |
| 2492 | if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) { |
| 2493 | /* push the data over before sending rsp */ |
| 2494 | nvmet_fc_transfer_fcp_data(tgtport, fod, |
| 2495 | op: NVMET_FCOP_READDATA); |
| 2496 | return; |
| 2497 | } |
| 2498 | |
| 2499 | /* writes & no data - fall thru */ |
| 2500 | } |
| 2501 | |
| 2502 | /* data no longer needed */ |
| 2503 | nvmet_fc_free_tgt_pgs(fod); |
| 2504 | |
| 2505 | nvmet_fc_xmt_fcp_rsp(tgtport, fod); |
| 2506 | } |
| 2507 | |
| 2508 | |
| 2509 | static void |
| 2510 | nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req) |
| 2511 | { |
| 2512 | struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req); |
| 2513 | struct nvmet_fc_tgtport *tgtport = fod->tgtport; |
| 2514 | |
| 2515 | __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, status: 0); |
| 2516 | } |
| 2517 | |
| 2518 | |
| 2519 | /* |
| 2520 | * Actual processing routine for received FC-NVME I/O Requests from the LLD |
| 2521 | */ |
| 2522 | static void |
| 2523 | nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, |
| 2524 | struct nvmet_fc_fcp_iod *fod) |
| 2525 | { |
| 2526 | struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf; |
| 2527 | u32 xfrlen = be32_to_cpu(cmdiu->data_len); |
| 2528 | int ret; |
| 2529 | |
| 2530 | /* |
| 2531 | * Fused commands are currently not supported in the linux |
| 2532 | * implementation. |
| 2533 | * |
| 2534 | * As such, the implementation of the FC transport does not |
| 2535 | * look at the fused commands and order delivery to the upper |
| 2536 | * layer until we have both based on csn. |
| 2537 | */ |
| 2538 | |
| 2539 | fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done; |
| 2540 | |
| 2541 | if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) { |
| 2542 | fod->io_dir = NVMET_FCP_WRITE; |
| 2543 | if (!nvme_is_write(cmd: &cmdiu->sqe)) |
| 2544 | goto transport_error; |
| 2545 | } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) { |
| 2546 | fod->io_dir = NVMET_FCP_READ; |
| 2547 | if (nvme_is_write(cmd: &cmdiu->sqe)) |
| 2548 | goto transport_error; |
| 2549 | } else { |
| 2550 | fod->io_dir = NVMET_FCP_NODATA; |
| 2551 | if (xfrlen) |
| 2552 | goto transport_error; |
| 2553 | } |
| 2554 | |
| 2555 | fod->req.cmd = &fod->cmdiubuf.sqe; |
| 2556 | fod->req.cqe = &fod->rspiubuf.cqe; |
| 2557 | if (!tgtport->pe) |
| 2558 | goto transport_error; |
| 2559 | fod->req.port = tgtport->pe->port; |
| 2560 | |
| 2561 | /* clear any response payload */ |
| 2562 | memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf)); |
| 2563 | |
| 2564 | fod->data_sg = NULL; |
| 2565 | fod->data_sg_cnt = 0; |
| 2566 | |
| 2567 | ret = nvmet_req_init(req: &fod->req, sq: &fod->queue->nvme_sq, |
| 2568 | ops: &nvmet_fc_tgt_fcp_ops); |
| 2569 | if (!ret) { |
| 2570 | /* bad SQE content or invalid ctrl state */ |
| 2571 | /* nvmet layer has already called op done to send rsp. */ |
| 2572 | return; |
| 2573 | } |
| 2574 | |
| 2575 | fod->req.transfer_len = xfrlen; |
| 2576 | |
| 2577 | /* keep a running counter of tail position */ |
| 2578 | atomic_inc(v: &fod->queue->sqtail); |
| 2579 | |
| 2580 | if (fod->req.transfer_len) { |
| 2581 | ret = nvmet_fc_alloc_tgt_pgs(fod); |
| 2582 | if (ret) { |
| 2583 | nvmet_req_complete(req: &fod->req, status: ret); |
| 2584 | return; |
| 2585 | } |
| 2586 | } |
| 2587 | fod->req.sg = fod->data_sg; |
| 2588 | fod->req.sg_cnt = fod->data_sg_cnt; |
| 2589 | fod->offset = 0; |
| 2590 | |
| 2591 | if (fod->io_dir == NVMET_FCP_WRITE) { |
| 2592 | /* pull the data over before invoking nvmet layer */ |
| 2593 | nvmet_fc_transfer_fcp_data(tgtport, fod, op: NVMET_FCOP_WRITEDATA); |
| 2594 | return; |
| 2595 | } |
| 2596 | |
| 2597 | /* |
| 2598 | * Reads or no data: |
| 2599 | * |
| 2600 | * can invoke the nvmet_layer now. If read data, cmd completion will |
| 2601 | * push the data |
| 2602 | */ |
| 2603 | fod->req.execute(&fod->req); |
| 2604 | return; |
| 2605 | |
| 2606 | transport_error: |
| 2607 | nvmet_fc_abort_op(tgtport, fod); |
| 2608 | } |
| 2609 | |
| 2610 | /** |
| 2611 | * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD |
| 2612 | * upon the reception of a NVME FCP CMD IU. |
| 2613 | * |
| 2614 | * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc |
| 2615 | * layer for processing. |
| 2616 | * |
| 2617 | * The nvmet_fc layer allocates a local job structure (struct |
| 2618 | * nvmet_fc_fcp_iod) from the queue for the io and copies the |
| 2619 | * CMD IU buffer to the job structure. As such, on a successful |
| 2620 | * completion (returns 0), the LLDD may immediately free/reuse |
| 2621 | * the CMD IU buffer passed in the call. |
| 2622 | * |
| 2623 | * However, in some circumstances, due to the packetized nature of FC |
| 2624 | * and the api of the FC LLDD which may issue a hw command to send the |
| 2625 | * response, but the LLDD may not get the hw completion for that command |
| 2626 | * and upcall the nvmet_fc layer before a new command may be |
| 2627 | * asynchronously received - it's possible for a command to be received |
| 2628 | * before the LLDD and nvmet_fc have recycled the job structure. It gives |
| 2629 | * the appearance of more commands received than fits in the sq. |
| 2630 | * To alleviate this scenario, a temporary queue is maintained in the |
| 2631 | * transport for pending LLDD requests waiting for a queue job structure. |
| 2632 | * In these "overrun" cases, a temporary queue element is allocated |
| 2633 | * the LLDD request and CMD iu buffer information remembered, and the |
| 2634 | * routine returns a -EOVERFLOW status. Subsequently, when a queue job |
| 2635 | * structure is freed, it is immediately reallocated for anything on the |
| 2636 | * pending request list. The LLDDs defer_rcv() callback is called, |
| 2637 | * informing the LLDD that it may reuse the CMD IU buffer, and the io |
| 2638 | * is then started normally with the transport. |
| 2639 | * |
| 2640 | * The LLDD, when receiving an -EOVERFLOW completion status, is to treat |
| 2641 | * the completion as successful but must not reuse the CMD IU buffer |
| 2642 | * until the LLDD's defer_rcv() callback has been called for the |
| 2643 | * corresponding struct nvmefc_tgt_fcp_req pointer. |
| 2644 | * |
| 2645 | * If there is any other condition in which an error occurs, the |
| 2646 | * transport will return a non-zero status indicating the error. |
| 2647 | * In all cases other than -EOVERFLOW, the transport has not accepted the |
| 2648 | * request and the LLDD should abort the exchange. |
| 2649 | * |
| 2650 | * @target_port: pointer to the (registered) target port the FCP CMD IU |
| 2651 | * was received on. |
| 2652 | * @fcpreq: pointer to a fcpreq request structure to be used to reference |
| 2653 | * the exchange corresponding to the FCP Exchange. |
| 2654 | * @cmdiubuf: pointer to the buffer containing the FCP CMD IU |
| 2655 | * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU |
| 2656 | */ |
| 2657 | int |
| 2658 | nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port, |
| 2659 | struct nvmefc_tgt_fcp_req *fcpreq, |
| 2660 | void *cmdiubuf, u32 cmdiubuf_len) |
| 2661 | { |
| 2662 | struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(targetport: target_port); |
| 2663 | struct nvme_fc_cmd_iu *cmdiu = cmdiubuf; |
| 2664 | struct nvmet_fc_tgt_queue *queue; |
| 2665 | struct nvmet_fc_fcp_iod *fod; |
| 2666 | struct nvmet_fc_defer_fcp_req *deferfcp; |
| 2667 | unsigned long flags; |
| 2668 | |
| 2669 | /* validate iu, so the connection id can be used to find the queue */ |
| 2670 | if ((cmdiubuf_len != sizeof(*cmdiu)) || |
| 2671 | (cmdiu->format_id != NVME_CMD_FORMAT_ID) || |
| 2672 | (cmdiu->fc_id != NVME_CMD_FC_ID) || |
| 2673 | (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4))) |
| 2674 | return -EIO; |
| 2675 | |
| 2676 | queue = nvmet_fc_find_target_queue(tgtport, |
| 2677 | be64_to_cpu(cmdiu->connection_id)); |
| 2678 | if (!queue) |
| 2679 | return -ENOTCONN; |
| 2680 | |
| 2681 | /* |
| 2682 | * note: reference taken by find_target_queue |
| 2683 | * After successful fod allocation, the fod will inherit the |
| 2684 | * ownership of that reference and will remove the reference |
| 2685 | * when the fod is freed. |
| 2686 | */ |
| 2687 | |
| 2688 | spin_lock_irqsave(&queue->qlock, flags); |
| 2689 | |
| 2690 | fod = nvmet_fc_alloc_fcp_iod(queue); |
| 2691 | if (fod) { |
| 2692 | spin_unlock_irqrestore(lock: &queue->qlock, flags); |
| 2693 | |
| 2694 | fcpreq->nvmet_fc_private = fod; |
| 2695 | fod->fcpreq = fcpreq; |
| 2696 | |
| 2697 | memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); |
| 2698 | |
| 2699 | nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq); |
| 2700 | |
| 2701 | return 0; |
| 2702 | } |
| 2703 | |
| 2704 | if (!tgtport->ops->defer_rcv) { |
| 2705 | spin_unlock_irqrestore(lock: &queue->qlock, flags); |
| 2706 | /* release the queue lookup reference */ |
| 2707 | nvmet_fc_tgt_q_put(queue); |
| 2708 | return -ENOENT; |
| 2709 | } |
| 2710 | |
| 2711 | deferfcp = list_first_entry_or_null(&queue->avail_defer_list, |
| 2712 | struct nvmet_fc_defer_fcp_req, req_list); |
| 2713 | if (deferfcp) { |
| 2714 | /* Just re-use one that was previously allocated */ |
| 2715 | list_del(entry: &deferfcp->req_list); |
| 2716 | } else { |
| 2717 | spin_unlock_irqrestore(lock: &queue->qlock, flags); |
| 2718 | |
| 2719 | /* Now we need to dynamically allocate one */ |
| 2720 | deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL); |
| 2721 | if (!deferfcp) { |
| 2722 | /* release the queue lookup reference */ |
| 2723 | nvmet_fc_tgt_q_put(queue); |
| 2724 | return -ENOMEM; |
| 2725 | } |
| 2726 | spin_lock_irqsave(&queue->qlock, flags); |
| 2727 | } |
| 2728 | |
| 2729 | /* For now, use rspaddr / rsplen to save payload information */ |
| 2730 | fcpreq->rspaddr = cmdiubuf; |
| 2731 | fcpreq->rsplen = cmdiubuf_len; |
| 2732 | deferfcp->fcp_req = fcpreq; |
| 2733 | |
| 2734 | /* defer processing till a fod becomes available */ |
| 2735 | list_add_tail(new: &deferfcp->req_list, head: &queue->pending_cmd_list); |
| 2736 | |
| 2737 | /* NOTE: the queue lookup reference is still valid */ |
| 2738 | |
| 2739 | spin_unlock_irqrestore(lock: &queue->qlock, flags); |
| 2740 | |
| 2741 | return -EOVERFLOW; |
| 2742 | } |
| 2743 | EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req); |
| 2744 | |
| 2745 | /** |
| 2746 | * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD |
| 2747 | * upon the reception of an ABTS for a FCP command |
| 2748 | * |
| 2749 | * Notify the transport that an ABTS has been received for a FCP command |
| 2750 | * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The |
| 2751 | * LLDD believes the command is still being worked on |
| 2752 | * (template_ops->fcp_req_release() has not been called). |
| 2753 | * |
| 2754 | * The transport will wait for any outstanding work (an op to the LLDD, |
| 2755 | * which the lldd should complete with error due to the ABTS; or the |
| 2756 | * completion from the nvmet layer of the nvme command), then will |
| 2757 | * stop processing and call the nvmet_fc_rcv_fcp_req() callback to |
| 2758 | * return the i/o context to the LLDD. The LLDD may send the BA_ACC |
| 2759 | * to the ABTS either after return from this function (assuming any |
| 2760 | * outstanding op work has been terminated) or upon the callback being |
| 2761 | * called. |
| 2762 | * |
| 2763 | * @target_port: pointer to the (registered) target port the FCP CMD IU |
| 2764 | * was received on. |
| 2765 | * @fcpreq: pointer to the fcpreq request structure that corresponds |
| 2766 | * to the exchange that received the ABTS. |
| 2767 | */ |
| 2768 | void |
| 2769 | nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port, |
| 2770 | struct nvmefc_tgt_fcp_req *fcpreq) |
| 2771 | { |
| 2772 | struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; |
| 2773 | struct nvmet_fc_tgt_queue *queue; |
| 2774 | unsigned long flags; |
| 2775 | |
| 2776 | if (!fod || fod->fcpreq != fcpreq) |
| 2777 | /* job appears to have already completed, ignore abort */ |
| 2778 | return; |
| 2779 | |
| 2780 | queue = fod->queue; |
| 2781 | |
| 2782 | spin_lock_irqsave(&queue->qlock, flags); |
| 2783 | if (fod->active) { |
| 2784 | /* |
| 2785 | * mark as abort. The abort handler, invoked upon completion |
| 2786 | * of any work, will detect the aborted status and do the |
| 2787 | * callback. |
| 2788 | */ |
| 2789 | spin_lock(lock: &fod->flock); |
| 2790 | fod->abort = true; |
| 2791 | fod->aborted = true; |
| 2792 | spin_unlock(lock: &fod->flock); |
| 2793 | } |
| 2794 | spin_unlock_irqrestore(lock: &queue->qlock, flags); |
| 2795 | } |
| 2796 | EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort); |
| 2797 | |
| 2798 | |
| 2799 | struct nvmet_fc_traddr { |
| 2800 | u64 nn; |
| 2801 | u64 pn; |
| 2802 | }; |
| 2803 | |
| 2804 | static int |
| 2805 | __nvme_fc_parse_u64(substring_t *sstr, u64 *val) |
| 2806 | { |
| 2807 | u64 token64; |
| 2808 | |
| 2809 | if (match_u64(sstr, result: &token64)) |
| 2810 | return -EINVAL; |
| 2811 | *val = token64; |
| 2812 | |
| 2813 | return 0; |
| 2814 | } |
| 2815 | |
| 2816 | /* |
| 2817 | * This routine validates and extracts the WWN's from the TRADDR string. |
| 2818 | * As kernel parsers need the 0x to determine number base, universally |
| 2819 | * build string to parse with 0x prefix before parsing name strings. |
| 2820 | */ |
| 2821 | static int |
| 2822 | nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen) |
| 2823 | { |
| 2824 | char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1]; |
| 2825 | substring_t wwn = { name, &name[sizeof(name)-1] }; |
| 2826 | int nnoffset, pnoffset; |
| 2827 | |
| 2828 | /* validate if string is one of the 2 allowed formats */ |
| 2829 | if (strnlen(p: buf, maxlen: blen) == NVME_FC_TRADDR_MAXLENGTH && |
| 2830 | !strncmp(buf, "nn-0x" , NVME_FC_TRADDR_OXNNLEN) && |
| 2831 | !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], |
| 2832 | "pn-0x" , NVME_FC_TRADDR_OXNNLEN)) { |
| 2833 | nnoffset = NVME_FC_TRADDR_OXNNLEN; |
| 2834 | pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET + |
| 2835 | NVME_FC_TRADDR_OXNNLEN; |
| 2836 | } else if ((strnlen(p: buf, maxlen: blen) == NVME_FC_TRADDR_MINLENGTH && |
| 2837 | !strncmp(buf, "nn-" , NVME_FC_TRADDR_NNLEN) && |
| 2838 | !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET], |
| 2839 | "pn-" , NVME_FC_TRADDR_NNLEN))) { |
| 2840 | nnoffset = NVME_FC_TRADDR_NNLEN; |
| 2841 | pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN; |
| 2842 | } else |
| 2843 | goto out_einval; |
| 2844 | |
| 2845 | name[0] = '0'; |
| 2846 | name[1] = 'x'; |
| 2847 | name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0; |
| 2848 | |
| 2849 | memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN); |
| 2850 | if (__nvme_fc_parse_u64(sstr: &wwn, val: &traddr->nn)) |
| 2851 | goto out_einval; |
| 2852 | |
| 2853 | memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN); |
| 2854 | if (__nvme_fc_parse_u64(sstr: &wwn, val: &traddr->pn)) |
| 2855 | goto out_einval; |
| 2856 | |
| 2857 | return 0; |
| 2858 | |
| 2859 | out_einval: |
| 2860 | pr_warn("%s: bad traddr string\n" , __func__); |
| 2861 | return -EINVAL; |
| 2862 | } |
| 2863 | |
| 2864 | static int |
| 2865 | nvmet_fc_add_port(struct nvmet_port *port) |
| 2866 | { |
| 2867 | struct nvmet_fc_tgtport *tgtport; |
| 2868 | struct nvmet_fc_port_entry *pe; |
| 2869 | struct nvmet_fc_traddr traddr = { 0L, 0L }; |
| 2870 | unsigned long flags; |
| 2871 | int ret; |
| 2872 | |
| 2873 | /* validate the address info */ |
| 2874 | if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) || |
| 2875 | (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC)) |
| 2876 | return -EINVAL; |
| 2877 | |
| 2878 | /* map the traddr address info to a target port */ |
| 2879 | |
| 2880 | ret = nvme_fc_parse_traddr(traddr: &traddr, buf: port->disc_addr.traddr, |
| 2881 | blen: sizeof(port->disc_addr.traddr)); |
| 2882 | if (ret) |
| 2883 | return ret; |
| 2884 | |
| 2885 | pe = kzalloc(sizeof(*pe), GFP_KERNEL); |
| 2886 | if (!pe) |
| 2887 | return -ENOMEM; |
| 2888 | |
| 2889 | ret = -ENXIO; |
| 2890 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
| 2891 | list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) { |
| 2892 | if ((tgtport->fc_target_port.node_name == traddr.nn) && |
| 2893 | (tgtport->fc_target_port.port_name == traddr.pn)) { |
| 2894 | if (!nvmet_fc_tgtport_get(tgtport)) |
| 2895 | continue; |
| 2896 | |
| 2897 | /* a FC port can only be 1 nvmet port id */ |
| 2898 | if (!tgtport->pe) { |
| 2899 | nvmet_fc_portentry_bind(tgtport, pe, port); |
| 2900 | ret = 0; |
| 2901 | } else |
| 2902 | ret = -EALREADY; |
| 2903 | |
| 2904 | nvmet_fc_tgtport_put(tgtport); |
| 2905 | break; |
| 2906 | } |
| 2907 | } |
| 2908 | spin_unlock_irqrestore(lock: &nvmet_fc_tgtlock, flags); |
| 2909 | |
| 2910 | if (ret) |
| 2911 | kfree(objp: pe); |
| 2912 | |
| 2913 | return ret; |
| 2914 | } |
| 2915 | |
| 2916 | static void |
| 2917 | nvmet_fc_remove_port(struct nvmet_port *port) |
| 2918 | { |
| 2919 | struct nvmet_fc_port_entry *pe = port->priv; |
| 2920 | struct nvmet_fc_tgtport *tgtport = NULL; |
| 2921 | unsigned long flags; |
| 2922 | |
| 2923 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
| 2924 | if (pe->tgtport && nvmet_fc_tgtport_get(tgtport: pe->tgtport)) |
| 2925 | tgtport = pe->tgtport; |
| 2926 | spin_unlock_irqrestore(lock: &nvmet_fc_tgtlock, flags); |
| 2927 | |
| 2928 | nvmet_fc_portentry_unbind(pe); |
| 2929 | |
| 2930 | if (tgtport) { |
| 2931 | /* terminate any outstanding associations */ |
| 2932 | __nvmet_fc_free_assocs(tgtport); |
| 2933 | nvmet_fc_tgtport_put(tgtport); |
| 2934 | } |
| 2935 | |
| 2936 | kfree(objp: pe); |
| 2937 | } |
| 2938 | |
| 2939 | static void |
| 2940 | nvmet_fc_discovery_chg(struct nvmet_port *port) |
| 2941 | { |
| 2942 | struct nvmet_fc_port_entry *pe = port->priv; |
| 2943 | struct nvmet_fc_tgtport *tgtport = NULL; |
| 2944 | unsigned long flags; |
| 2945 | |
| 2946 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
| 2947 | if (pe->tgtport && nvmet_fc_tgtport_get(tgtport: pe->tgtport)) |
| 2948 | tgtport = pe->tgtport; |
| 2949 | spin_unlock_irqrestore(lock: &nvmet_fc_tgtlock, flags); |
| 2950 | |
| 2951 | if (!tgtport) |
| 2952 | return; |
| 2953 | |
| 2954 | if (tgtport && tgtport->ops->discovery_event) |
| 2955 | tgtport->ops->discovery_event(&tgtport->fc_target_port); |
| 2956 | |
| 2957 | nvmet_fc_tgtport_put(tgtport); |
| 2958 | } |
| 2959 | |
| 2960 | static ssize_t |
| 2961 | nvmet_fc_host_traddr(struct nvmet_ctrl *ctrl, |
| 2962 | char *traddr, size_t traddr_size) |
| 2963 | { |
| 2964 | struct nvmet_sq *sq = ctrl->sqs[0]; |
| 2965 | struct nvmet_fc_tgt_queue *queue = |
| 2966 | container_of(sq, struct nvmet_fc_tgt_queue, nvme_sq); |
| 2967 | struct nvmet_fc_tgtport *tgtport = queue->assoc ? queue->assoc->tgtport : NULL; |
| 2968 | struct nvmet_fc_hostport *hostport = queue->assoc ? queue->assoc->hostport : NULL; |
| 2969 | u64 wwnn, wwpn; |
| 2970 | ssize_t ret = 0; |
| 2971 | |
| 2972 | if (!tgtport || !nvmet_fc_tgtport_get(tgtport)) |
| 2973 | return -ENODEV; |
| 2974 | if (!hostport || !nvmet_fc_hostport_get(hostport)) { |
| 2975 | ret = -ENODEV; |
| 2976 | goto out_put; |
| 2977 | } |
| 2978 | |
| 2979 | if (tgtport->ops->host_traddr) { |
| 2980 | ret = tgtport->ops->host_traddr(hostport->hosthandle, &wwnn, &wwpn); |
| 2981 | if (ret) |
| 2982 | goto out_put_host; |
| 2983 | ret = snprintf(buf: traddr, size: traddr_size, fmt: "nn-0x%llx:pn-0x%llx" , wwnn, wwpn); |
| 2984 | } |
| 2985 | out_put_host: |
| 2986 | nvmet_fc_hostport_put(hostport); |
| 2987 | out_put: |
| 2988 | nvmet_fc_tgtport_put(tgtport); |
| 2989 | return ret; |
| 2990 | } |
| 2991 | |
| 2992 | static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { |
| 2993 | .owner = THIS_MODULE, |
| 2994 | .type = NVMF_TRTYPE_FC, |
| 2995 | .msdbd = 1, |
| 2996 | .add_port = nvmet_fc_add_port, |
| 2997 | .remove_port = nvmet_fc_remove_port, |
| 2998 | .queue_response = nvmet_fc_fcp_nvme_cmd_done, |
| 2999 | .delete_ctrl = nvmet_fc_delete_ctrl, |
| 3000 | .discovery_chg = nvmet_fc_discovery_chg, |
| 3001 | .host_traddr = nvmet_fc_host_traddr, |
| 3002 | }; |
| 3003 | |
| 3004 | static int __init nvmet_fc_init_module(void) |
| 3005 | { |
| 3006 | return nvmet_register_transport(ops: &nvmet_fc_tgt_fcp_ops); |
| 3007 | } |
| 3008 | |
| 3009 | static void __exit nvmet_fc_exit_module(void) |
| 3010 | { |
| 3011 | /* ensure any shutdown operation, e.g. delete ctrls have finished */ |
| 3012 | flush_workqueue(nvmet_wq); |
| 3013 | |
| 3014 | /* sanity check - all lports should be removed */ |
| 3015 | if (!list_empty(head: &nvmet_fc_target_list)) |
| 3016 | pr_warn("%s: targetport list not empty\n" , __func__); |
| 3017 | |
| 3018 | nvmet_unregister_transport(ops: &nvmet_fc_tgt_fcp_ops); |
| 3019 | |
| 3020 | ida_destroy(ida: &nvmet_fc_tgtport_cnt); |
| 3021 | } |
| 3022 | |
| 3023 | module_init(nvmet_fc_init_module); |
| 3024 | module_exit(nvmet_fc_exit_module); |
| 3025 | |
| 3026 | MODULE_DESCRIPTION("NVMe target FC transport driver" ); |
| 3027 | MODULE_LICENSE("GPL v2" ); |
| 3028 | |