nvmet.h 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328
  1. /*
  2. * Copyright (c) 2015-2016 HGST, a Western Digital Company.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. */
  13. #ifndef _NVMET_H
  14. #define _NVMET_H
  15. #include <linux/dma-mapping.h>
  16. #include <linux/types.h>
  17. #include <linux/device.h>
  18. #include <linux/kref.h>
  19. #include <linux/percpu-refcount.h>
  20. #include <linux/list.h>
  21. #include <linux/mutex.h>
  22. #include <linux/nvme.h>
  23. #include <linux/configfs.h>
  24. #include <linux/rcupdate.h>
  25. #include <linux/blkdev.h>
  26. #define NVMET_ASYNC_EVENTS 4
  27. #define NVMET_ERROR_LOG_SLOTS 128
  28. /* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
  29. * The 16 bit shift is to set IATTR bit to 1, which means offending
  30. * offset starts in the data section of connect()
  31. */
  32. #define IPO_IATTR_CONNECT_DATA(x) \
  33. (cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
  34. #define IPO_IATTR_CONNECT_SQE(x) \
  35. (cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
  36. struct nvmet_ns {
  37. struct list_head dev_link;
  38. struct percpu_ref ref;
  39. struct block_device *bdev;
  40. u32 nsid;
  41. u32 blksize_shift;
  42. loff_t size;
  43. u8 nguid[16];
  44. bool enabled;
  45. struct nvmet_subsys *subsys;
  46. const char *device_path;
  47. struct config_group device_group;
  48. struct config_group group;
  49. struct completion disable_done;
  50. };
  51. static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
  52. {
  53. return container_of(to_config_group(item), struct nvmet_ns, group);
  54. }
  55. struct nvmet_cq {
  56. u16 qid;
  57. u16 size;
  58. };
  59. struct nvmet_sq {
  60. struct nvmet_ctrl *ctrl;
  61. struct percpu_ref ref;
  62. u16 qid;
  63. u16 size;
  64. struct completion free_done;
  65. };
  66. /**
  67. * struct nvmet_port - Common structure to keep port
  68. * information for the target.
  69. * @entry: List head for holding a list of these elements.
  70. * @disc_addr: Address information is stored in a format defined
  71. * for a discovery log page entry.
  72. * @group: ConfigFS group for this element's folder.
  73. * @priv: Private data for the transport.
  74. */
  75. struct nvmet_port {
  76. struct list_head entry;
  77. struct nvmf_disc_rsp_page_entry disc_addr;
  78. struct config_group group;
  79. struct config_group subsys_group;
  80. struct list_head subsystems;
  81. struct config_group referrals_group;
  82. struct list_head referrals;
  83. void *priv;
  84. bool enabled;
  85. };
  86. static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
  87. {
  88. return container_of(to_config_group(item), struct nvmet_port,
  89. group);
  90. }
  91. struct nvmet_ctrl {
  92. struct nvmet_subsys *subsys;
  93. struct nvmet_cq **cqs;
  94. struct nvmet_sq **sqs;
  95. struct mutex lock;
  96. u64 cap;
  97. u64 serial;
  98. u32 cc;
  99. u32 csts;
  100. u16 cntlid;
  101. u32 kato;
  102. struct nvmet_req *async_event_cmds[NVMET_ASYNC_EVENTS];
  103. unsigned int nr_async_event_cmds;
  104. struct list_head async_events;
  105. struct work_struct async_event_work;
  106. struct list_head subsys_entry;
  107. struct kref ref;
  108. struct delayed_work ka_work;
  109. struct work_struct fatal_err_work;
  110. struct nvmet_fabrics_ops *ops;
  111. char subsysnqn[NVMF_NQN_FIELD_LEN];
  112. char hostnqn[NVMF_NQN_FIELD_LEN];
  113. };
  114. struct nvmet_subsys {
  115. enum nvme_subsys_type type;
  116. struct mutex lock;
  117. struct kref ref;
  118. struct list_head namespaces;
  119. unsigned int max_nsid;
  120. struct list_head ctrls;
  121. struct ida cntlid_ida;
  122. struct list_head hosts;
  123. bool allow_any_host;
  124. u16 max_qid;
  125. u64 ver;
  126. char *subsysnqn;
  127. struct config_group group;
  128. struct config_group namespaces_group;
  129. struct config_group allowed_hosts_group;
  130. };
  131. static inline struct nvmet_subsys *to_subsys(struct config_item *item)
  132. {
  133. return container_of(to_config_group(item), struct nvmet_subsys, group);
  134. }
  135. static inline struct nvmet_subsys *namespaces_to_subsys(
  136. struct config_item *item)
  137. {
  138. return container_of(to_config_group(item), struct nvmet_subsys,
  139. namespaces_group);
  140. }
  141. struct nvmet_host {
  142. struct config_group group;
  143. };
  144. static inline struct nvmet_host *to_host(struct config_item *item)
  145. {
  146. return container_of(to_config_group(item), struct nvmet_host, group);
  147. }
  148. static inline char *nvmet_host_name(struct nvmet_host *host)
  149. {
  150. return config_item_name(&host->group.cg_item);
  151. }
  152. struct nvmet_host_link {
  153. struct list_head entry;
  154. struct nvmet_host *host;
  155. };
  156. struct nvmet_subsys_link {
  157. struct list_head entry;
  158. struct nvmet_subsys *subsys;
  159. };
  160. struct nvmet_req;
  161. struct nvmet_fabrics_ops {
  162. struct module *owner;
  163. unsigned int type;
  164. unsigned int sqe_inline_size;
  165. unsigned int msdbd;
  166. bool has_keyed_sgls : 1;
  167. void (*queue_response)(struct nvmet_req *req);
  168. int (*add_port)(struct nvmet_port *port);
  169. void (*remove_port)(struct nvmet_port *port);
  170. void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
  171. };
  172. #define NVMET_MAX_INLINE_BIOVEC 8
  173. struct nvmet_req {
  174. struct nvme_command *cmd;
  175. struct nvme_completion *rsp;
  176. struct nvmet_sq *sq;
  177. struct nvmet_cq *cq;
  178. struct nvmet_ns *ns;
  179. struct scatterlist *sg;
  180. struct bio inline_bio;
  181. struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC];
  182. int sg_cnt;
  183. size_t data_len;
  184. struct nvmet_port *port;
  185. void (*execute)(struct nvmet_req *req);
  186. struct nvmet_fabrics_ops *ops;
  187. };
  188. static inline void nvmet_set_status(struct nvmet_req *req, u16 status)
  189. {
  190. req->rsp->status = cpu_to_le16(status << 1);
  191. }
  192. static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
  193. {
  194. req->rsp->result = cpu_to_le32(result);
  195. }
  196. /*
  197. * NVMe command writes actually are DMA reads for us on the target side.
  198. */
  199. static inline enum dma_data_direction
  200. nvmet_data_dir(struct nvmet_req *req)
  201. {
  202. return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
  203. }
  204. struct nvmet_async_event {
  205. struct list_head entry;
  206. u8 event_type;
  207. u8 event_info;
  208. u8 log_page;
  209. };
  210. int nvmet_parse_connect_cmd(struct nvmet_req *req);
  211. int nvmet_parse_io_cmd(struct nvmet_req *req);
  212. int nvmet_parse_admin_cmd(struct nvmet_req *req);
  213. int nvmet_parse_discovery_cmd(struct nvmet_req *req);
  214. int nvmet_parse_fabrics_cmd(struct nvmet_req *req);
  215. bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
  216. struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops);
  217. void nvmet_req_complete(struct nvmet_req *req, u16 status);
  218. void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
  219. u16 size);
  220. void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
  221. u16 size);
  222. void nvmet_sq_destroy(struct nvmet_sq *sq);
  223. int nvmet_sq_init(struct nvmet_sq *sq);
  224. void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
  225. void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
  226. u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
  227. struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
  228. u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
  229. struct nvmet_req *req, struct nvmet_ctrl **ret);
  230. void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
  231. struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
  232. enum nvme_subsys_type type);
  233. void nvmet_subsys_put(struct nvmet_subsys *subsys);
  234. struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
  235. void nvmet_put_namespace(struct nvmet_ns *ns);
  236. int nvmet_ns_enable(struct nvmet_ns *ns);
  237. void nvmet_ns_disable(struct nvmet_ns *ns);
  238. struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
  239. void nvmet_ns_free(struct nvmet_ns *ns);
  240. int nvmet_register_transport(struct nvmet_fabrics_ops *ops);
  241. void nvmet_unregister_transport(struct nvmet_fabrics_ops *ops);
  242. int nvmet_enable_port(struct nvmet_port *port);
  243. void nvmet_disable_port(struct nvmet_port *port);
  244. void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
  245. void nvmet_referral_disable(struct nvmet_port *port);
  246. u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
  247. size_t len);
  248. u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
  249. size_t len);
  250. u32 nvmet_get_log_page_len(struct nvme_command *cmd);
  251. #define NVMET_QUEUE_SIZE 1024
  252. #define NVMET_NR_QUEUES 64
  253. #define NVMET_MAX_CMD NVMET_QUEUE_SIZE
  254. #define NVMET_KAS 10
  255. #define NVMET_DISC_KATO 120
  256. int __init nvmet_init_configfs(void);
  257. void __exit nvmet_exit_configfs(void);
  258. int __init nvmet_init_discovery(void);
  259. void nvmet_exit_discovery(void);
  260. extern struct nvmet_subsys *nvmet_disc_subsys;
  261. extern u64 nvmet_genctr;
  262. extern struct rw_semaphore nvmet_config_sem;
  263. bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
  264. const char *hostnqn);
  265. #endif /* _NVMET_H */