1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 */
5
6#ifndef _NVMET_H
7#define _NVMET_H
8
9#include <linux/dma-mapping.h>
10#include <linux/types.h>
11#include <linux/device.h>
12#include <linux/kref.h>
13#include <linux/percpu-refcount.h>
14#include <linux/list.h>
15#include <linux/mutex.h>
16#include <linux/uuid.h>
17#include <linux/nvme.h>
18#include <linux/configfs.h>
19#include <linux/rcupdate.h>
20#include <linux/blkdev.h>
21#include <linux/radix-tree.h>
22#include <linux/t10-pi.h>
23#include <linux/kfifo.h>
24
25#define NVMET_DEFAULT_VS NVME_VS(2, 1, 0)
26
27#define NVMET_NS_ENABLED XA_MARK_1
28#define NVMET_ASYNC_EVENTS 4
29#define NVMET_ERROR_LOG_SLOTS 128
30#define NVMET_NO_ERROR_LOC ((u16)-1)
31#define NVMET_DEFAULT_CTRL_MODEL "Linux"
32#define NVMET_MN_MAX_SIZE 40
33#define NVMET_SN_MAX_SIZE 20
34#define NVMET_FR_MAX_SIZE 8
35#define NVMET_PR_LOG_QUEUE_SIZE 64
36
37#define nvmet_for_each_ns(xa, index, entry) \
38 xa_for_each(xa, index, entry)
39
40#define nvmet_for_each_enabled_ns(xa, index, entry) \
41 xa_for_each_marked(xa, index, entry, NVMET_NS_ENABLED)
42
43/*
44 * Supported optional AENs:
45 */
46#define NVMET_AEN_CFG_OPTIONAL \
47 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE)
48#define NVMET_DISC_AEN_CFG_OPTIONAL \
49 (NVME_AEN_CFG_DISC_CHANGE)
50
51/*
52 * Plus mandatory SMART AENs (we'll never send them, but allow enabling them):
53 */
54#define NVMET_AEN_CFG_ALL \
55 (NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \
56 NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \
57 NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL)
58
59/* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
60 * The 16 bit shift is to set IATTR bit to 1, which means offending
61 * offset starts in the data section of connect()
62 */
63#define IPO_IATTR_CONNECT_DATA(x) \
64 (cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
65#define IPO_IATTR_CONNECT_SQE(x) \
66 (cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
67
68struct nvmet_pr_registrant {
69 u64 rkey;
70 uuid_t hostid;
71 enum nvme_pr_type rtype;
72 struct list_head entry;
73 struct rcu_head rcu;
74};
75
76struct nvmet_pr {
77 bool enable;
78 unsigned long notify_mask;
79 atomic_t generation;
80 struct nvmet_pr_registrant __rcu *holder;
81 /*
82 * During the execution of the reservation command, mutual
83 * exclusion is required throughout the process. However,
84 * while waiting asynchronously for the 'per controller
85 * percpu_ref' to complete before the 'preempt and abort'
86 * command finishes, a semaphore is needed to ensure mutual
87 * exclusion instead of a mutex.
88 */
89 struct semaphore pr_sem;
90 struct list_head registrant_list;
91};
92
93struct nvmet_pr_per_ctrl_ref {
94 struct percpu_ref ref;
95 struct completion free_done;
96 struct completion confirm_done;
97 uuid_t hostid;
98};
99
100struct nvmet_ns {
101 struct percpu_ref ref;
102 struct file *bdev_file;
103 struct block_device *bdev;
104 struct file *file;
105 bool readonly;
106 u32 nsid;
107 u32 blksize_shift;
108 loff_t size;
109 u8 nguid[16];
110 uuid_t uuid;
111 u32 anagrpid;
112
113 bool buffered_io;
114 bool enabled;
115 struct nvmet_subsys *subsys;
116 const char *device_path;
117
118 struct config_group device_group;
119 struct config_group group;
120
121 struct completion disable_done;
122 mempool_t *bvec_pool;
123
124 struct pci_dev *p2p_dev;
125 int use_p2pmem;
126 int pi_type;
127 int metadata_size;
128 u8 csi;
129 struct nvmet_pr pr;
130 struct xarray pr_per_ctrl_refs;
131};
132
133static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
134{
135 return container_of(to_config_group(item), struct nvmet_ns, group);
136}
137
138static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns)
139{
140 return ns->bdev ? disk_to_dev(ns->bdev->bd_disk) : NULL;
141}
142
143struct nvmet_cq {
144 struct nvmet_ctrl *ctrl;
145 u16 qid;
146 u16 size;
147 refcount_t ref;
148};
149
150struct nvmet_sq {
151 struct nvmet_ctrl *ctrl;
152 struct percpu_ref ref;
153 struct nvmet_cq *cq;
154 u16 qid;
155 u16 size;
156 u32 sqhd;
157 bool sqhd_disabled;
158#ifdef CONFIG_NVME_TARGET_AUTH
159 bool authenticated;
160 struct delayed_work auth_expired_work;
161 u16 dhchap_tid;
162 u8 sc_c;
163 u8 dhchap_status;
164 u8 dhchap_step;
165 u8 *dhchap_c1;
166 u8 *dhchap_c2;
167 u32 dhchap_s1;
168 u32 dhchap_s2;
169 u8 *dhchap_skey;
170 int dhchap_skey_len;
171#endif
172#ifdef CONFIG_NVME_TARGET_TCP_TLS
173 struct key *tls_key;
174#endif
175 struct completion free_done;
176 struct completion confirm_done;
177};
178
179struct nvmet_ana_group {
180 struct config_group group;
181 struct nvmet_port *port;
182 u32 grpid;
183};
184
185static inline struct nvmet_ana_group *to_ana_group(struct config_item *item)
186{
187 return container_of(to_config_group(item), struct nvmet_ana_group,
188 group);
189}
190
191/**
192 * struct nvmet_port - Common structure to keep port
193 * information for the target.
194 * @entry: Entry into referrals or transport list.
195 * @disc_addr: Address information is stored in a format defined
196 * for a discovery log page entry.
197 * @group: ConfigFS group for this element's folder.
198 * @priv: Private data for the transport.
199 */
200struct nvmet_port {
201 struct list_head entry;
202 struct nvmf_disc_rsp_page_entry disc_addr;
203 struct config_group group;
204 struct config_group subsys_group;
205 struct list_head subsystems;
206 struct config_group referrals_group;
207 struct list_head referrals;
208 struct list_head global_entry;
209 struct config_group ana_groups_group;
210 struct nvmet_ana_group ana_default_group;
211 enum nvme_ana_state *ana_state;
212 struct key *keyring;
213 void *priv;
214 bool enabled;
215 int inline_data_size;
216 int max_queue_size;
217 const struct nvmet_fabrics_ops *tr_ops;
218 bool pi_enable;
219};
220
221static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
222{
223 return container_of(to_config_group(item), struct nvmet_port,
224 group);
225}
226
227static inline struct nvmet_port *ana_groups_to_port(
228 struct config_item *item)
229{
230 return container_of(to_config_group(item), struct nvmet_port,
231 ana_groups_group);
232}
233
234static inline u8 nvmet_port_disc_addr_treq_secure_channel(struct nvmet_port *port)
235{
236 return (port->disc_addr.treq & NVME_TREQ_SECURE_CHANNEL_MASK);
237}
238
239static inline bool nvmet_port_secure_channel_required(struct nvmet_port *port)
240{
241 return nvmet_port_disc_addr_treq_secure_channel(port) == NVMF_TREQ_REQUIRED;
242}
243
244struct nvmet_pr_log_mgr {
245 struct mutex lock;
246 u64 lost_count;
247 u64 counter;
248 DECLARE_KFIFO(log_queue, struct nvme_pr_log, NVMET_PR_LOG_QUEUE_SIZE);
249};
250
251struct nvmet_ctrl {
252 struct nvmet_subsys *subsys;
253 struct nvmet_sq **sqs;
254 struct nvmet_cq **cqs;
255
256 void *drvdata;
257
258 bool reset_tbkas;
259
260 struct mutex lock;
261 u64 cap;
262 u32 cc;
263 u32 csts;
264
265 uuid_t hostid;
266 u16 cntlid;
267 u32 kato;
268
269 struct nvmet_port *port;
270
271 u32 aen_enabled;
272 unsigned long aen_masked;
273 struct nvmet_req *async_event_cmds[NVMET_ASYNC_EVENTS];
274 unsigned int nr_async_event_cmds;
275 struct list_head async_events;
276 struct work_struct async_event_work;
277
278 struct list_head subsys_entry;
279 struct kref ref;
280 struct delayed_work ka_work;
281 struct work_struct fatal_err_work;
282
283 const struct nvmet_fabrics_ops *ops;
284
285 __le32 *changed_ns_list;
286 u32 nr_changed_ns;
287
288 char hostnqn[NVMF_NQN_FIELD_LEN];
289
290 struct device *p2p_client;
291 struct radix_tree_root p2p_ns_map;
292#ifdef CONFIG_NVME_TARGET_DEBUGFS
293 struct dentry *debugfs_dir;
294#endif
295 spinlock_t error_lock;
296 u64 err_counter;
297 struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS];
298 bool pi_support;
299 bool concat;
300#ifdef CONFIG_NVME_TARGET_AUTH
301 struct nvme_dhchap_key *host_key;
302 struct nvme_dhchap_key *ctrl_key;
303 u8 shash_id;
304 struct crypto_kpp *dh_tfm;
305 u8 dh_gid;
306 u8 *dh_key;
307 size_t dh_keysize;
308#endif
309#ifdef CONFIG_NVME_TARGET_TCP_TLS
310 struct key *tls_key;
311#endif
312 struct nvmet_pr_log_mgr pr_log_mgr;
313};
314
315struct nvmet_subsys {
316 enum nvme_subsys_type type;
317
318 struct mutex lock;
319 struct kref ref;
320
321 struct xarray namespaces;
322 unsigned int nr_namespaces;
323 u32 max_nsid;
324 u16 cntlid_min;
325 u16 cntlid_max;
326
327 struct list_head ctrls;
328
329 struct list_head hosts;
330 bool allow_any_host;
331#ifdef CONFIG_NVME_TARGET_DEBUGFS
332 struct dentry *debugfs_dir;
333#endif
334 u16 max_qid;
335
336 u64 ver;
337 char serial[NVMET_SN_MAX_SIZE];
338 bool subsys_discovered;
339 char *subsysnqn;
340 bool pi_support;
341
342 struct config_group group;
343
344 struct config_group namespaces_group;
345 struct config_group allowed_hosts_group;
346
347 u16 vendor_id;
348 u16 subsys_vendor_id;
349 char *model_number;
350 u32 ieee_oui;
351 char *firmware_rev;
352
353#ifdef CONFIG_NVME_TARGET_PASSTHRU
354 struct nvme_ctrl *passthru_ctrl;
355 char *passthru_ctrl_path;
356 struct config_group passthru_group;
357 unsigned int admin_timeout;
358 unsigned int io_timeout;
359 unsigned int clear_ids;
360#endif /* CONFIG_NVME_TARGET_PASSTHRU */
361
362#ifdef CONFIG_BLK_DEV_ZONED
363 u8 zasl;
364#endif /* CONFIG_BLK_DEV_ZONED */
365};
366
367static inline struct nvmet_subsys *to_subsys(struct config_item *item)
368{
369 return container_of(to_config_group(item), struct nvmet_subsys, group);
370}
371
372static inline struct nvmet_subsys *namespaces_to_subsys(
373 struct config_item *item)
374{
375 return container_of(to_config_group(item), struct nvmet_subsys,
376 namespaces_group);
377}
378
379struct nvmet_host {
380 struct config_group group;
381 u8 *dhchap_secret;
382 u8 *dhchap_ctrl_secret;
383 u8 dhchap_key_hash;
384 u8 dhchap_ctrl_key_hash;
385 u8 dhchap_hash_id;
386 u8 dhchap_dhgroup_id;
387};
388
389static inline struct nvmet_host *to_host(struct config_item *item)
390{
391 return container_of(to_config_group(item), struct nvmet_host, group);
392}
393
394static inline char *nvmet_host_name(struct nvmet_host *host)
395{
396 return config_item_name(item: &host->group.cg_item);
397}
398
399struct nvmet_host_link {
400 struct list_head entry;
401 struct nvmet_host *host;
402};
403
404struct nvmet_subsys_link {
405 struct list_head entry;
406 struct nvmet_subsys *subsys;
407};
408
409struct nvmet_req;
410struct nvmet_fabrics_ops {
411 struct module *owner;
412 unsigned int type;
413 unsigned int msdbd;
414 unsigned int flags;
415#define NVMF_KEYED_SGLS (1 << 0)
416#define NVMF_METADATA_SUPPORTED (1 << 1)
417 void (*queue_response)(struct nvmet_req *req);
418 int (*add_port)(struct nvmet_port *port);
419 void (*remove_port)(struct nvmet_port *port);
420 void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
421 void (*disc_traddr)(struct nvmet_req *req,
422 struct nvmet_port *port, char *traddr);
423 ssize_t (*host_traddr)(struct nvmet_ctrl *ctrl,
424 char *traddr, size_t traddr_len);
425 u16 (*install_queue)(struct nvmet_sq *nvme_sq);
426 void (*discovery_chg)(struct nvmet_port *port);
427 u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
428 u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl);
429
430 /* Operations mandatory for PCI target controllers */
431 u16 (*create_sq)(struct nvmet_ctrl *ctrl, u16 sqid, u16 cqid, u16 flags,
432 u16 qsize, u64 prp1);
433 u16 (*delete_sq)(struct nvmet_ctrl *ctrl, u16 sqid);
434 u16 (*create_cq)(struct nvmet_ctrl *ctrl, u16 cqid, u16 flags,
435 u16 qsize, u64 prp1, u16 irq_vector);
436 u16 (*delete_cq)(struct nvmet_ctrl *ctrl, u16 cqid);
437 u16 (*set_feature)(const struct nvmet_ctrl *ctrl, u8 feat,
438 void *feat_data);
439 u16 (*get_feature)(const struct nvmet_ctrl *ctrl, u8 feat,
440 void *feat_data);
441};
442
443#define NVMET_MAX_INLINE_BIOVEC 8
444#define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE
445
446struct nvmet_req {
447 struct nvme_command *cmd;
448 struct nvme_completion *cqe;
449 struct nvmet_sq *sq;
450 struct nvmet_cq *cq;
451 struct nvmet_ns *ns;
452 struct scatterlist *sg;
453 struct scatterlist *metadata_sg;
454 struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC];
455 union {
456 struct {
457 struct bio inline_bio;
458 } b;
459 struct {
460 bool mpool_alloc;
461 struct kiocb iocb;
462 struct bio_vec *bvec;
463 struct work_struct work;
464 } f;
465 struct {
466 struct bio inline_bio;
467 struct request *rq;
468 struct work_struct work;
469 bool use_workqueue;
470 } p;
471#ifdef CONFIG_BLK_DEV_ZONED
472 struct {
473 struct bio inline_bio;
474 struct work_struct zmgmt_work;
475 } z;
476#endif /* CONFIG_BLK_DEV_ZONED */
477 struct {
478 struct work_struct abort_work;
479 } r;
480 };
481 int sg_cnt;
482 int metadata_sg_cnt;
483 /* data length as parsed from the SGL descriptor: */
484 size_t transfer_len;
485 size_t metadata_len;
486
487 struct nvmet_port *port;
488
489 void (*execute)(struct nvmet_req *req);
490 const struct nvmet_fabrics_ops *ops;
491
492 struct pci_dev *p2p_dev;
493 struct device *p2p_client;
494 u16 error_loc;
495 u64 error_slba;
496 struct nvmet_pr_per_ctrl_ref *pc_ref;
497};
498
499#define NVMET_MAX_MPOOL_BVEC 16
500extern struct kmem_cache *nvmet_bvec_cache;
501extern struct workqueue_struct *buffered_io_wq;
502extern struct workqueue_struct *zbd_wq;
503extern struct workqueue_struct *nvmet_wq;
504
505static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
506{
507 req->cqe->result.u32 = cpu_to_le32(result);
508}
509
510/*
511 * NVMe command writes actually are DMA reads for us on the target side.
512 */
513static inline enum dma_data_direction
514nvmet_data_dir(struct nvmet_req *req)
515{
516 return nvme_is_write(cmd: req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
517}
518
519struct nvmet_async_event {
520 struct list_head entry;
521 u8 event_type;
522 u8 event_info;
523 u8 log_page;
524};
525
526static inline void nvmet_clear_aen_bit(struct nvmet_req *req, u32 bn)
527{
528 int rae = le32_to_cpu(req->cmd->common.cdw10) & 1 << 15;
529
530 if (!rae)
531 clear_bit(nr: bn, addr: &req->sq->ctrl->aen_masked);
532}
533
534static inline bool nvmet_aen_bit_disabled(struct nvmet_ctrl *ctrl, u32 bn)
535{
536 if (!(READ_ONCE(ctrl->aen_enabled) & (1 << bn)))
537 return true;
538 return test_and_set_bit(nr: bn, addr: &ctrl->aen_masked);
539}
540
541void nvmet_get_feat_kato(struct nvmet_req *req);
542void nvmet_get_feat_async_event(struct nvmet_req *req);
543u16 nvmet_set_feat_kato(struct nvmet_req *req);
544u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask);
545void nvmet_execute_async_event(struct nvmet_req *req);
546void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl);
547void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl);
548
549u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
550u32 nvmet_connect_cmd_data_len(struct nvmet_req *req);
551void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
552u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
553u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
554u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);
555u32 nvmet_admin_cmd_data_len(struct nvmet_req *req);
556u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
557u32 nvmet_discovery_cmd_data_len(struct nvmet_req *req);
558u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
559u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req);
560u32 nvmet_fabrics_admin_cmd_data_len(struct nvmet_req *req);
561u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req);
562u32 nvmet_fabrics_io_cmd_data_len(struct nvmet_req *req);
563
564bool nvmet_req_init(struct nvmet_req *req, struct nvmet_sq *sq,
565 const struct nvmet_fabrics_ops *ops);
566void nvmet_req_uninit(struct nvmet_req *req);
567size_t nvmet_req_transfer_len(struct nvmet_req *req);
568bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len);
569bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
570void nvmet_req_complete(struct nvmet_req *req, u16 status);
571int nvmet_req_alloc_sgls(struct nvmet_req *req);
572void nvmet_req_free_sgls(struct nvmet_req *req);
573
574void nvmet_execute_set_features(struct nvmet_req *req);
575void nvmet_execute_get_features(struct nvmet_req *req);
576void nvmet_execute_keep_alive(struct nvmet_req *req);
577
578u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid, bool create);
579u16 nvmet_check_io_cqid(struct nvmet_ctrl *ctrl, u16 cqid, bool create);
580void nvmet_cq_init(struct nvmet_cq *cq);
581void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
582 u16 size);
583u16 nvmet_cq_create(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
584 u16 size);
585void nvmet_cq_destroy(struct nvmet_cq *cq);
586bool nvmet_cq_get(struct nvmet_cq *cq);
587void nvmet_cq_put(struct nvmet_cq *cq);
588bool nvmet_cq_in_use(struct nvmet_cq *cq);
589u16 nvmet_check_sqid(struct nvmet_ctrl *ctrl, u16 sqid, bool create);
590void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
591 u16 size);
592u16 nvmet_sq_create(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
593 struct nvmet_cq *cq, u16 qid, u16 size);
594void nvmet_sq_destroy(struct nvmet_sq *sq);
595int nvmet_sq_init(struct nvmet_sq *sq, struct nvmet_cq *cq);
596
597void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
598
599void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
600
601struct nvmet_alloc_ctrl_args {
602 struct nvmet_port *port;
603 struct nvmet_sq *sq;
604 char *subsysnqn;
605 char *hostnqn;
606 uuid_t *hostid;
607 const struct nvmet_fabrics_ops *ops;
608 struct device *p2p_client;
609 u32 kato;
610 __le32 result;
611 u16 error_loc;
612 u16 status;
613};
614
615struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args);
616struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
617 const char *hostnqn, u16 cntlid,
618 struct nvmet_req *req);
619void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
620u16 nvmet_check_ctrl_status(struct nvmet_req *req);
621ssize_t nvmet_ctrl_host_traddr(struct nvmet_ctrl *ctrl,
622 char *traddr, size_t traddr_len);
623
624struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
625 enum nvme_subsys_type type);
626void nvmet_subsys_put(struct nvmet_subsys *subsys);
627void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
628
629u16 nvmet_req_find_ns(struct nvmet_req *req);
630void nvmet_put_namespace(struct nvmet_ns *ns);
631int nvmet_ns_enable(struct nvmet_ns *ns);
632void nvmet_ns_disable(struct nvmet_ns *ns);
633struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
634void nvmet_ns_free(struct nvmet_ns *ns);
635
636void nvmet_send_ana_event(struct nvmet_subsys *subsys,
637 struct nvmet_port *port);
638void nvmet_port_send_ana_event(struct nvmet_port *port);
639
640int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
641void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
642
643void nvmet_port_del_ctrls(struct nvmet_port *port,
644 struct nvmet_subsys *subsys);
645
646int nvmet_enable_port(struct nvmet_port *port);
647void nvmet_disable_port(struct nvmet_port *port);
648
649void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
650void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port);
651
652u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
653 size_t len);
654u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
655 size_t len);
656u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
657
658u32 nvmet_get_log_page_len(struct nvme_command *cmd);
659u64 nvmet_get_log_page_offset(struct nvme_command *cmd);
660
661extern struct list_head *nvmet_ports;
662void nvmet_port_disc_changed(struct nvmet_port *port,
663 struct nvmet_subsys *subsys);
664void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
665 struct nvmet_host *host);
666void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
667 u8 event_info, u8 log_page);
668
669#define NVMET_MIN_QUEUE_SIZE 16
670#define NVMET_MAX_QUEUE_SIZE 1024
671#define NVMET_NR_QUEUES 128
672#define NVMET_MAX_CMD(ctrl) (NVME_CAP_MQES(ctrl->cap) + 1)
673
674/*
675 * Nice round number that makes a list of nsids fit into a page.
676 * Should become tunable at some point in the future.
677 */
678#define NVMET_MAX_NAMESPACES 1024
679
680/*
681 * 0 is not a valid ANA group ID, so we start numbering at 1.
682 *
683 * ANA Group 1 exists without manual intervention, has namespaces assigned to it
684 * by default, and is available in an optimized state through all ports.
685 */
686#define NVMET_MAX_ANAGRPS 128
687#define NVMET_DEFAULT_ANA_GRPID 1
688
689#define NVMET_KAS 10
690#define NVMET_DISC_KATO_MS 120000
691
692int __init nvmet_init_configfs(void);
693void __exit nvmet_exit_configfs(void);
694
695int __init nvmet_init_discovery(void);
696void nvmet_exit_discovery(void);
697
698extern struct nvmet_subsys *nvmet_disc_subsys;
699extern struct rw_semaphore nvmet_config_sem;
700
701extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
702extern u64 nvmet_ana_chgcnt;
703extern struct rw_semaphore nvmet_ana_sem;
704
705bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn);
706
707int nvmet_bdev_ns_enable(struct nvmet_ns *ns);
708int nvmet_file_ns_enable(struct nvmet_ns *ns);
709void nvmet_bdev_ns_disable(struct nvmet_ns *ns);
710void nvmet_file_ns_disable(struct nvmet_ns *ns);
711u16 nvmet_bdev_flush(struct nvmet_req *req);
712u16 nvmet_file_flush(struct nvmet_req *req);
713void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
714void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
715void nvmet_file_ns_revalidate(struct nvmet_ns *ns);
716bool nvmet_ns_revalidate(struct nvmet_ns *ns);
717u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);
718
719bool nvmet_bdev_zns_enable(struct nvmet_ns *ns);
720void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req);
721void nvmet_execute_identify_ns_zns(struct nvmet_req *req);
722void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req);
723void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req);
724void nvmet_bdev_execute_zone_append(struct nvmet_req *req);
725
726static inline u32 nvmet_rw_data_len(struct nvmet_req *req)
727{
728 return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
729 req->ns->blksize_shift;
730}
731
732static inline u32 nvmet_rw_metadata_len(struct nvmet_req *req)
733{
734 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
735 return 0;
736 return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) *
737 req->ns->metadata_size;
738}
739
740static inline u32 nvmet_dsm_len(struct nvmet_req *req)
741{
742 return (le32_to_cpu(req->cmd->dsm.nr) + 1) *
743 sizeof(struct nvme_dsm_range);
744}
745
746static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req)
747{
748 return req->sq->ctrl->subsys;
749}
750
751static inline bool nvmet_is_disc_subsys(struct nvmet_subsys *subsys)
752{
753 return subsys->type != NVME_NQN_NVME;
754}
755
756static inline bool nvmet_is_pci_ctrl(struct nvmet_ctrl *ctrl)
757{
758 return ctrl->port->disc_addr.trtype == NVMF_TRTYPE_PCI;
759}
760
761#ifdef CONFIG_NVME_TARGET_PASSTHRU
762void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
763int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
764void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys);
765u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req);
766u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req);
767static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
768{
769 return subsys->passthru_ctrl;
770}
771#else /* CONFIG_NVME_TARGET_PASSTHRU */
772static inline void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
773{
774}
775static inline void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
776{
777}
778static inline u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
779{
780 return 0;
781}
782static inline u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
783{
784 return 0;
785}
786static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
787{
788 return NULL;
789}
790#endif /* CONFIG_NVME_TARGET_PASSTHRU */
791
792static inline bool nvmet_is_passthru_req(struct nvmet_req *req)
793{
794 return nvmet_is_passthru_subsys(subsys: nvmet_req_subsys(req));
795}
796
797void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl);
798
799u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
800u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
801
802static inline bool nvmet_cc_en(u32 cc)
803{
804 return (cc & NVME_CC_ENABLE) >> NVME_CC_EN_SHIFT;
805}
806
807static inline u8 nvmet_cc_css(u32 cc)
808{
809 return (cc & NVME_CC_CSS_MASK) >> NVME_CC_CSS_SHIFT;
810}
811
812static inline u8 nvmet_cc_mps(u32 cc)
813{
814 return (cc & NVME_CC_MPS_MASK) >> NVME_CC_MPS_SHIFT;
815}
816
817static inline u8 nvmet_cc_ams(u32 cc)
818{
819 return (cc & NVME_CC_AMS_MASK) >> NVME_CC_AMS_SHIFT;
820}
821
822static inline u8 nvmet_cc_shn(u32 cc)
823{
824 return (cc & NVME_CC_SHN_MASK) >> NVME_CC_SHN_SHIFT;
825}
826
827static inline u8 nvmet_cc_iosqes(u32 cc)
828{
829 return (cc & NVME_CC_IOSQES_MASK) >> NVME_CC_IOSQES_SHIFT;
830}
831
832static inline u8 nvmet_cc_iocqes(u32 cc)
833{
834 return (cc & NVME_CC_IOCQES_MASK) >> NVME_CC_IOCQES_SHIFT;
835}
836
837/* Convert a 32-bit number to a 16-bit 0's based number */
838static inline __le16 to0based(u32 a)
839{
840 return cpu_to_le16(clamp(a, 1U, 1U << 16) - 1);
841}
842
843static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns)
844{
845 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
846 return false;
847 return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple);
848}
849
850static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect)
851{
852 return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT));
853}
854
855static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
856{
857 return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
858}
859
860static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
861{
862 return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
863 req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
864}
865
866static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
867{
868 if (bio != &req->b.inline_bio)
869 bio_put(bio);
870 else
871 bio_uninit(bio);
872}
873
874#ifdef CONFIG_NVME_TARGET_TCP_TLS
875static inline key_serial_t nvmet_queue_tls_keyid(struct nvmet_sq *sq)
876{
877 return sq->tls_key ? key_serial(key: sq->tls_key) : 0;
878}
879static inline void nvmet_sq_put_tls_key(struct nvmet_sq *sq)
880{
881 if (sq->tls_key) {
882 key_put(key: sq->tls_key);
883 sq->tls_key = NULL;
884 }
885}
886#else
887static inline key_serial_t nvmet_queue_tls_keyid(struct nvmet_sq *sq) { return 0; }
888static inline void nvmet_sq_put_tls_key(struct nvmet_sq *sq) {}
889#endif
890#ifdef CONFIG_NVME_TARGET_AUTH
891u32 nvmet_auth_send_data_len(struct nvmet_req *req);
892void nvmet_execute_auth_send(struct nvmet_req *req);
893u32 nvmet_auth_receive_data_len(struct nvmet_req *req);
894void nvmet_execute_auth_receive(struct nvmet_req *req);
895int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
896 bool set_ctrl);
897int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash);
898u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq);
899void nvmet_auth_sq_init(struct nvmet_sq *sq);
900void nvmet_destroy_auth(struct nvmet_ctrl *ctrl);
901void nvmet_auth_sq_free(struct nvmet_sq *sq);
902int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id);
903bool nvmet_check_auth_status(struct nvmet_req *req);
904int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
905 unsigned int hash_len);
906int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
907 unsigned int hash_len);
908static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq)
909{
910 return ctrl->host_key != NULL && !nvmet_queue_tls_keyid(sq);
911}
912int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
913 u8 *buf, int buf_size);
914int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
915 u8 *buf, int buf_size);
916void nvmet_auth_insert_psk(struct nvmet_sq *sq);
917#else
918static inline u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl,
919 struct nvmet_sq *sq)
920{
921 return 0;
922}
923static inline void nvmet_auth_sq_init(struct nvmet_sq *sq)
924{
925}
926static inline void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) {};
927static inline void nvmet_auth_sq_free(struct nvmet_sq *sq) {};
928static inline bool nvmet_check_auth_status(struct nvmet_req *req)
929{
930 return true;
931}
932static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl,
933 struct nvmet_sq *sq)
934{
935 return false;
936}
937static inline const char *nvmet_dhchap_dhgroup_name(u8 dhgid) { return NULL; }
938static inline void nvmet_auth_insert_psk(struct nvmet_sq *sq) {};
939#endif
940
941int nvmet_pr_init_ns(struct nvmet_ns *ns);
942u16 nvmet_parse_pr_cmd(struct nvmet_req *req);
943u16 nvmet_pr_check_cmd_access(struct nvmet_req *req);
944int nvmet_ctrl_init_pr(struct nvmet_ctrl *ctrl);
945void nvmet_ctrl_destroy_pr(struct nvmet_ctrl *ctrl);
946void nvmet_pr_exit_ns(struct nvmet_ns *ns);
947void nvmet_execute_get_log_page_resv(struct nvmet_req *req);
948u16 nvmet_set_feat_resv_notif_mask(struct nvmet_req *req, u32 mask);
949u16 nvmet_get_feat_resv_notif_mask(struct nvmet_req *req);
950u16 nvmet_pr_get_ns_pc_ref(struct nvmet_req *req);
951static inline void nvmet_pr_put_ns_pc_ref(struct nvmet_pr_per_ctrl_ref *pc_ref)
952{
953 percpu_ref_put(ref: &pc_ref->ref);
954}
955
956/*
957 * Data for the get_feature() and set_feature() operations of PCI target
958 * controllers.
959 */
960struct nvmet_feat_irq_coalesce {
961 u8 thr;
962 u8 time;
963};
964
965struct nvmet_feat_irq_config {
966 u16 iv;
967 bool cd;
968};
969
970struct nvmet_feat_arbitration {
971 u8 hpw;
972 u8 mpw;
973 u8 lpw;
974 u8 ab;
975};
976
977#endif /* _NVMET_H */
978

source code of linux/drivers/nvme/target/nvmet.h