admin-cmd.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549
  1. /*
  2. * NVMe admin command implementation.
  3. * Copyright (c) 2015-2016 HGST, a Western Digital Company.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. */
  14. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15. #include <linux/module.h>
  16. #include <generated/utsrelease.h>
  17. #include <asm/unaligned.h>
  18. #include "nvmet.h"
  19. u32 nvmet_get_log_page_len(struct nvme_command *cmd)
  20. {
  21. u32 len = le16_to_cpu(cmd->get_log_page.numdu);
  22. len <<= 16;
  23. len += le16_to_cpu(cmd->get_log_page.numdl);
  24. /* NUMD is a 0's based value */
  25. len += 1;
  26. len *= sizeof(u32);
  27. return len;
  28. }
  29. static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
  30. struct nvme_smart_log *slog)
  31. {
  32. u16 status;
  33. struct nvmet_ns *ns;
  34. u64 host_reads, host_writes, data_units_read, data_units_written;
  35. status = NVME_SC_SUCCESS;
  36. ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
  37. if (!ns) {
  38. status = NVME_SC_INVALID_NS;
  39. pr_err("nvmet : Counld not find namespace id : %d\n",
  40. le32_to_cpu(req->cmd->get_log_page.nsid));
  41. goto out;
  42. }
  43. host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
  44. data_units_read = part_stat_read(ns->bdev->bd_part, sectors[READ]);
  45. host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
  46. data_units_written = part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
  47. put_unaligned_le64(host_reads, &slog->host_reads[0]);
  48. put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
  49. put_unaligned_le64(host_writes, &slog->host_writes[0]);
  50. put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
  51. nvmet_put_namespace(ns);
  52. out:
  53. return status;
  54. }
  55. static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
  56. struct nvme_smart_log *slog)
  57. {
  58. u16 status;
  59. u64 host_reads = 0, host_writes = 0;
  60. u64 data_units_read = 0, data_units_written = 0;
  61. struct nvmet_ns *ns;
  62. struct nvmet_ctrl *ctrl;
  63. status = NVME_SC_SUCCESS;
  64. ctrl = req->sq->ctrl;
  65. rcu_read_lock();
  66. list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
  67. host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
  68. data_units_read +=
  69. part_stat_read(ns->bdev->bd_part, sectors[READ]);
  70. host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
  71. data_units_written +=
  72. part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
  73. }
  74. rcu_read_unlock();
  75. put_unaligned_le64(host_reads, &slog->host_reads[0]);
  76. put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
  77. put_unaligned_le64(host_writes, &slog->host_writes[0]);
  78. put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
  79. return status;
  80. }
  81. static u16 nvmet_get_smart_log(struct nvmet_req *req,
  82. struct nvme_smart_log *slog)
  83. {
  84. u16 status;
  85. WARN_ON(req == NULL || slog == NULL);
  86. if (req->cmd->get_log_page.nsid == 0xFFFFFFFF)
  87. status = nvmet_get_smart_log_all(req, slog);
  88. else
  89. status = nvmet_get_smart_log_nsid(req, slog);
  90. return status;
  91. }
  92. static void nvmet_execute_get_log_page(struct nvmet_req *req)
  93. {
  94. struct nvme_smart_log *smart_log;
  95. size_t data_len = nvmet_get_log_page_len(req->cmd);
  96. void *buf;
  97. u16 status = 0;
  98. buf = kzalloc(data_len, GFP_KERNEL);
  99. if (!buf) {
  100. status = NVME_SC_INTERNAL;
  101. goto out;
  102. }
  103. switch (req->cmd->get_log_page.lid) {
  104. case 0x01:
  105. /*
  106. * We currently never set the More bit in the status field,
  107. * so all error log entries are invalid and can be zeroed out.
  108. * This is called a minum viable implementation (TM) of this
  109. * mandatory log page.
  110. */
  111. break;
  112. case 0x02:
  113. /*
  114. * XXX: fill out actual smart log
  115. *
  116. * We might have a hard time coming up with useful values for
  117. * many of the fields, and even when we have useful data
  118. * available (e.g. units or commands read/written) those aren't
  119. * persistent over power loss.
  120. */
  121. if (data_len != sizeof(*smart_log)) {
  122. status = NVME_SC_INTERNAL;
  123. goto err;
  124. }
  125. smart_log = buf;
  126. status = nvmet_get_smart_log(req, smart_log);
  127. if (status) {
  128. memset(buf, '\0', data_len);
  129. goto err;
  130. }
  131. break;
  132. case 0x03:
  133. /*
  134. * We only support a single firmware slot which always is
  135. * active, so we can zero out the whole firmware slot log and
  136. * still claim to fully implement this mandatory log page.
  137. */
  138. break;
  139. default:
  140. BUG();
  141. }
  142. status = nvmet_copy_to_sgl(req, 0, buf, data_len);
  143. err:
  144. kfree(buf);
  145. out:
  146. nvmet_req_complete(req, status);
  147. }
  148. static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
  149. {
  150. struct nvmet_ctrl *ctrl = req->sq->ctrl;
  151. struct nvme_id_ctrl *id;
  152. u16 status = 0;
  153. id = kzalloc(sizeof(*id), GFP_KERNEL);
  154. if (!id) {
  155. status = NVME_SC_INTERNAL;
  156. goto out;
  157. }
  158. /* XXX: figure out how to assign real vendors IDs. */
  159. id->vid = 0;
  160. id->ssvid = 0;
  161. memset(id->sn, ' ', sizeof(id->sn));
  162. snprintf(id->sn, sizeof(id->sn), "%llx", ctrl->serial);
  163. memset(id->mn, ' ', sizeof(id->mn));
  164. strncpy((char *)id->mn, "Linux", sizeof(id->mn));
  165. memset(id->fr, ' ', sizeof(id->fr));
  166. strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr));
  167. id->rab = 6;
  168. /*
  169. * XXX: figure out how we can assign a IEEE OUI, but until then
  170. * the safest is to leave it as zeroes.
  171. */
  172. /* we support multiple ports and multiples hosts: */
  173. id->cmic = (1 << 0) | (1 << 1);
  174. /* no limit on data transfer sizes for now */
  175. id->mdts = 0;
  176. id->cntlid = cpu_to_le16(ctrl->cntlid);
  177. id->ver = cpu_to_le32(ctrl->subsys->ver);
  178. /* XXX: figure out what to do about RTD3R/RTD3 */
  179. id->oaes = cpu_to_le32(1 << 8);
  180. id->ctratt = cpu_to_le32(1 << 0);
  181. id->oacs = 0;
  182. /*
  183. * We don't really have a practical limit on the number of abort
  184. * comands. But we don't do anything useful for abort either, so
  185. * no point in allowing more abort commands than the spec requires.
  186. */
  187. id->acl = 3;
  188. id->aerl = NVMET_ASYNC_EVENTS - 1;
  189. /* first slot is read-only, only one slot supported */
  190. id->frmw = (1 << 0) | (1 << 1);
  191. id->lpa = (1 << 0) | (1 << 2);
  192. id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
  193. id->npss = 0;
  194. /* We support keep-alive timeout in granularity of seconds */
  195. id->kas = cpu_to_le16(NVMET_KAS);
  196. id->sqes = (0x6 << 4) | 0x6;
  197. id->cqes = (0x4 << 4) | 0x4;
  198. /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
  199. id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
  200. id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
  201. id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM);
  202. /* XXX: don't report vwc if the underlying device is write through */
  203. id->vwc = NVME_CTRL_VWC_PRESENT;
  204. /*
  205. * We can't support atomic writes bigger than a LBA without support
  206. * from the backend device.
  207. */
  208. id->awun = 0;
  209. id->awupf = 0;
  210. id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
  211. if (ctrl->ops->has_keyed_sgls)
  212. id->sgls |= cpu_to_le32(1 << 2);
  213. if (ctrl->ops->sqe_inline_size)
  214. id->sgls |= cpu_to_le32(1 << 20);
  215. strcpy(id->subnqn, ctrl->subsys->subsysnqn);
  216. /* Max command capsule size is sqe + single page of in-capsule data */
  217. id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
  218. ctrl->ops->sqe_inline_size) / 16);
  219. /* Max response capsule size is cqe */
  220. id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
  221. id->msdbd = ctrl->ops->msdbd;
  222. /*
  223. * Meh, we don't really support any power state. Fake up the same
  224. * values that qemu does.
  225. */
  226. id->psd[0].max_power = cpu_to_le16(0x9c4);
  227. id->psd[0].entry_lat = cpu_to_le32(0x10);
  228. id->psd[0].exit_lat = cpu_to_le32(0x4);
  229. status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
  230. kfree(id);
  231. out:
  232. nvmet_req_complete(req, status);
  233. }
  234. static void nvmet_execute_identify_ns(struct nvmet_req *req)
  235. {
  236. struct nvmet_ns *ns;
  237. struct nvme_id_ns *id;
  238. u16 status = 0;
  239. ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
  240. if (!ns) {
  241. status = NVME_SC_INVALID_NS | NVME_SC_DNR;
  242. goto out;
  243. }
  244. id = kzalloc(sizeof(*id), GFP_KERNEL);
  245. if (!id) {
  246. status = NVME_SC_INTERNAL;
  247. goto out_put_ns;
  248. }
  249. /*
  250. * nuse = ncap = nsze isn't aways true, but we have no way to find
  251. * that out from the underlying device.
  252. */
  253. id->ncap = id->nuse = id->nsze =
  254. cpu_to_le64(ns->size >> ns->blksize_shift);
  255. /*
  256. * We just provide a single LBA format that matches what the
  257. * underlying device reports.
  258. */
  259. id->nlbaf = 0;
  260. id->flbas = 0;
  261. /*
  262. * Our namespace might always be shared. Not just with other
  263. * controllers, but also with any other user of the block device.
  264. */
  265. id->nmic = (1 << 0);
  266. memcpy(&id->nguid, &ns->nguid, sizeof(uuid_le));
  267. id->lbaf[0].ds = ns->blksize_shift;
  268. status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
  269. kfree(id);
  270. out_put_ns:
  271. nvmet_put_namespace(ns);
  272. out:
  273. nvmet_req_complete(req, status);
  274. }
  275. static void nvmet_execute_identify_nslist(struct nvmet_req *req)
  276. {
  277. static const int buf_size = 4096;
  278. struct nvmet_ctrl *ctrl = req->sq->ctrl;
  279. struct nvmet_ns *ns;
  280. u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
  281. __le32 *list;
  282. u16 status = 0;
  283. int i = 0;
  284. list = kzalloc(buf_size, GFP_KERNEL);
  285. if (!list) {
  286. status = NVME_SC_INTERNAL;
  287. goto out;
  288. }
  289. rcu_read_lock();
  290. list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
  291. if (ns->nsid <= min_nsid)
  292. continue;
  293. list[i++] = cpu_to_le32(ns->nsid);
  294. if (i == buf_size / sizeof(__le32))
  295. break;
  296. }
  297. rcu_read_unlock();
  298. status = nvmet_copy_to_sgl(req, 0, list, buf_size);
  299. kfree(list);
  300. out:
  301. nvmet_req_complete(req, status);
  302. }
  303. /*
  304. * A "mimimum viable" abort implementation: the command is mandatory in the
  305. * spec, but we are not required to do any useful work. We couldn't really
  306. * do a useful abort, so don't bother even with waiting for the command
  307. * to be exectuted and return immediately telling the command to abort
  308. * wasn't found.
  309. */
  310. static void nvmet_execute_abort(struct nvmet_req *req)
  311. {
  312. nvmet_set_result(req, 1);
  313. nvmet_req_complete(req, 0);
  314. }
  315. static void nvmet_execute_set_features(struct nvmet_req *req)
  316. {
  317. struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
  318. u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
  319. u64 val;
  320. u32 val32;
  321. u16 status = 0;
  322. switch (cdw10 & 0xf) {
  323. case NVME_FEAT_NUM_QUEUES:
  324. nvmet_set_result(req,
  325. (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
  326. break;
  327. case NVME_FEAT_KATO:
  328. val = le64_to_cpu(req->cmd->prop_set.value);
  329. val32 = val & 0xffff;
  330. req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
  331. nvmet_set_result(req, req->sq->ctrl->kato);
  332. break;
  333. default:
  334. status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  335. break;
  336. }
  337. nvmet_req_complete(req, status);
  338. }
  339. static void nvmet_execute_get_features(struct nvmet_req *req)
  340. {
  341. struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
  342. u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
  343. u16 status = 0;
  344. switch (cdw10 & 0xf) {
  345. /*
  346. * These features are mandatory in the spec, but we don't
  347. * have a useful way to implement them. We'll eventually
  348. * need to come up with some fake values for these.
  349. */
  350. #if 0
  351. case NVME_FEAT_ARBITRATION:
  352. break;
  353. case NVME_FEAT_POWER_MGMT:
  354. break;
  355. case NVME_FEAT_TEMP_THRESH:
  356. break;
  357. case NVME_FEAT_ERR_RECOVERY:
  358. break;
  359. case NVME_FEAT_IRQ_COALESCE:
  360. break;
  361. case NVME_FEAT_IRQ_CONFIG:
  362. break;
  363. case NVME_FEAT_WRITE_ATOMIC:
  364. break;
  365. case NVME_FEAT_ASYNC_EVENT:
  366. break;
  367. #endif
  368. case NVME_FEAT_VOLATILE_WC:
  369. nvmet_set_result(req, 1);
  370. break;
  371. case NVME_FEAT_NUM_QUEUES:
  372. nvmet_set_result(req,
  373. (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
  374. break;
  375. case NVME_FEAT_KATO:
  376. nvmet_set_result(req, req->sq->ctrl->kato * 1000);
  377. break;
  378. default:
  379. status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  380. break;
  381. }
  382. nvmet_req_complete(req, status);
  383. }
  384. static void nvmet_execute_async_event(struct nvmet_req *req)
  385. {
  386. struct nvmet_ctrl *ctrl = req->sq->ctrl;
  387. mutex_lock(&ctrl->lock);
  388. if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
  389. mutex_unlock(&ctrl->lock);
  390. nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
  391. return;
  392. }
  393. ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
  394. mutex_unlock(&ctrl->lock);
  395. schedule_work(&ctrl->async_event_work);
  396. }
  397. static void nvmet_execute_keep_alive(struct nvmet_req *req)
  398. {
  399. struct nvmet_ctrl *ctrl = req->sq->ctrl;
  400. pr_debug("ctrl %d update keep-alive timer for %d secs\n",
  401. ctrl->cntlid, ctrl->kato);
  402. mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
  403. nvmet_req_complete(req, 0);
  404. }
  405. int nvmet_parse_admin_cmd(struct nvmet_req *req)
  406. {
  407. struct nvme_command *cmd = req->cmd;
  408. req->ns = NULL;
  409. if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
  410. pr_err("nvmet: got admin cmd %d while CC.EN == 0\n",
  411. cmd->common.opcode);
  412. return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
  413. }
  414. if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
  415. pr_err("nvmet: got admin cmd %d while CSTS.RDY == 0\n",
  416. cmd->common.opcode);
  417. return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
  418. }
  419. switch (cmd->common.opcode) {
  420. case nvme_admin_get_log_page:
  421. req->data_len = nvmet_get_log_page_len(cmd);
  422. switch (cmd->get_log_page.lid) {
  423. case 0x01:
  424. case 0x02:
  425. case 0x03:
  426. req->execute = nvmet_execute_get_log_page;
  427. return 0;
  428. }
  429. break;
  430. case nvme_admin_identify:
  431. req->data_len = 4096;
  432. switch (le32_to_cpu(cmd->identify.cns)) {
  433. case NVME_ID_CNS_NS:
  434. req->execute = nvmet_execute_identify_ns;
  435. return 0;
  436. case NVME_ID_CNS_CTRL:
  437. req->execute = nvmet_execute_identify_ctrl;
  438. return 0;
  439. case NVME_ID_CNS_NS_ACTIVE_LIST:
  440. req->execute = nvmet_execute_identify_nslist;
  441. return 0;
  442. }
  443. break;
  444. case nvme_admin_abort_cmd:
  445. req->execute = nvmet_execute_abort;
  446. req->data_len = 0;
  447. return 0;
  448. case nvme_admin_set_features:
  449. req->execute = nvmet_execute_set_features;
  450. req->data_len = 0;
  451. return 0;
  452. case nvme_admin_get_features:
  453. req->execute = nvmet_execute_get_features;
  454. req->data_len = 0;
  455. return 0;
  456. case nvme_admin_async_event:
  457. req->execute = nvmet_execute_async_event;
  458. req->data_len = 0;
  459. return 0;
  460. case nvme_admin_keep_alive:
  461. req->execute = nvmet_execute_keep_alive;
  462. req->data_len = 0;
  463. return 0;
  464. }
  465. pr_err("nvmet: unhandled cmd %d\n", cmd->common.opcode);
  466. return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
  467. }