keystone-sa.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899
  1. /*
  2. * Keystone crypto accelerator driver
  3. *
  4. * Copyright (C) 2015, 2016 Texas Instruments Incorporated - http://www.ti.com
  5. *
  6. * Authors: Sandeep Nair
  7. * Vitaly Andrianov
  8. *
  9. * Contributors:Tinku Mannan
  10. * Hao Zhang
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * version 2 as published by the Free Software Foundation.
  15. *
  16. * This program is distributed in the hope that it will be useful, but
  17. * WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * General Public License for more details.
  20. */
  21. #include <linux/clk.h>
  22. #include <linux/err.h>
  23. #include <linux/init.h>
  24. #include <linux/slab.h>
  25. #include <linux/module.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/dmapool.h>
  28. #include <linux/of.h>
  29. #include <linux/of_address.h>
  30. #include <linux/dma-mapping.h>
  31. #include <linux/firmware.h>
  32. #include <linux/platform_device.h>
  33. #include <linux/pm_runtime.h>
  34. #include <linux/regmap.h>
  35. #include <linux/mfd/syscon.h>
  36. #include <linux/soc/ti/knav_dma.h>
  37. #include <linux/soc/ti/knav_qmss.h>
  38. #include <linux/soc/ti/knav_helpers.h>
  39. #include <crypto/des.h>
  40. #include "keystone-sa.h"
  41. #include "keystone-sa-hlp.h"
  42. #define SA_ATTR(_name, _mode, _show, _store) \
  43. struct sa_kobj_attribute sa_attr_##_name = \
  44. __ATTR(_name, _mode, _show, _store)
  45. #define to_sa_kobj_attr(_attr) \
  46. container_of(_attr, struct sa_kobj_attribute, attr)
  47. #define to_crypto_data_from_stats_obj(obj) \
  48. container_of(obj, struct keystone_crypto_data, stats_kobj)
  49. struct device *sa_ks2_dev;
  50. /**
  51. * sa_allocate_rx_buf() - Allocate ONE receive buffer for Rx descriptors
  52. * @dev_data: struct keystone_crypto_data pinter
  53. * @fdq: fdq index.
  54. *
  55. * This function allocates rx buffers and push them to the free descripto
  56. * queue (fdq).
  57. *
  58. * An RX channel may have up to 4 free descriptor queues (fdq 0-3). Each
  59. * queue may keep buffer with one particular size.
  60. * SA crypto driver allocates buffers for the first queue with size
  61. * 1500 bytes. All other queues have buffers with one page size.
  62. * Hardware descriptors are taken from rx_pool, filled with buffer's address
  63. * and size and pushed to a corresponding to the fdq index rx_fdq.
  64. *
  65. * Return: function returns -ENOMEM in case of error, 0 otherwise
  66. */
  67. static int sa_allocate_rx_buf(struct keystone_crypto_data *dev_data,
  68. int fdq)
  69. {
  70. struct device *dev = &dev_data->pdev->dev;
  71. struct knav_dma_desc *hwdesc;
  72. unsigned int buf_len, dma_sz;
  73. u32 desc_info, pkt_info;
  74. void *bufptr;
  75. struct page *page;
  76. dma_addr_t dma;
  77. u32 sw_data[2];
  78. /* Allocate descriptor */
  79. hwdesc = knav_pool_desc_get(dev_data->rx_pool);
  80. if (IS_ERR_OR_NULL(hwdesc)) {
  81. dev_dbg(dev, "out of rx pool desc\n");
  82. return -ENOMEM;
  83. }
  84. if (fdq == 0) {
  85. buf_len = SA_RX_BUF0_SIZE;
  86. bufptr = kmalloc(buf_len, GFP_ATOMIC | GFP_DMA | __GFP_COLD);
  87. if (unlikely(!bufptr)) {
  88. dev_warn_ratelimited(dev, "Primary RX buffer alloc failed\n");
  89. goto fail;
  90. }
  91. dma = dma_map_single(dev, bufptr, buf_len, DMA_TO_DEVICE);
  92. sw_data[0] = (u32)bufptr;
  93. sw_data[1] = 0;
  94. } else {
  95. /* Allocate a secondary receive queue entry */
  96. page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD);
  97. if (unlikely(!page)) {
  98. dev_warn_ratelimited(dev, "Secondary page alloc failed\n");
  99. goto fail;
  100. }
  101. buf_len = PAGE_SIZE;
  102. dma = dma_map_page(dev, page, 0, buf_len, DMA_TO_DEVICE);
  103. sw_data[0] = (u32)page_address(page);
  104. sw_data[1] = (u32)page;
  105. atomic_inc(&dev_data->rx_dma_page_cnt);
  106. }
  107. desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC;
  108. desc_info |= buf_len & KNAV_DMA_DESC_PKT_LEN_MASK;
  109. pkt_info = KNAV_DMA_DESC_HAS_EPIB;
  110. pkt_info |= KNAV_DMA_NUM_PS_WORDS << KNAV_DMA_DESC_PSLEN_SHIFT;
  111. pkt_info |= (dev_data->rx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) <<
  112. KNAV_DMA_DESC_RETQ_SHIFT;
  113. hwdesc->orig_buff = dma;
  114. hwdesc->orig_len = buf_len;
  115. hwdesc->sw_data[0] = sw_data[0];
  116. hwdesc->sw_data[1] = sw_data[1];
  117. hwdesc->desc_info = desc_info;
  118. hwdesc->packet_info = pkt_info;
  119. /* Push to FDQs */
  120. knav_pool_desc_map(dev_data->rx_pool, hwdesc, sizeof(*hwdesc), &dma,
  121. &dma_sz);
  122. knav_queue_push(dev_data->rx_fdq[fdq], dma, sizeof(*hwdesc), 0);
  123. return 0;
  124. fail:
  125. knav_pool_desc_put(dev_data->rx_pool, hwdesc);
  126. return -ENOMEM;
  127. }
  128. /* Refill Rx FDQ with descriptors & attached buffers */
  129. static int sa_rxpool_refill(struct keystone_crypto_data *dev_data)
  130. {
  131. struct device *dev = &dev_data->pdev->dev;
  132. u32 fdq_deficit;
  133. int i;
  134. int ret = 0;
  135. /* Calculate the FDQ deficit and refill */
  136. for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && dev_data->rx_fdq[i] && !ret;
  137. i++) {
  138. fdq_deficit = dev_data->rx_queue_depths[i] -
  139. knav_queue_get_count(dev_data->rx_fdq[i]);
  140. while (fdq_deficit--) {
  141. ret = sa_allocate_rx_buf(dev_data, i);
  142. if (ret) {
  143. dev_err(dev, "cannot allocate rx_buffer\n");
  144. break;
  145. }
  146. }
  147. } /* end for fdqs */
  148. return ret;
  149. }
  150. /* Release ALL descriptors and attached buffers from Rx FDQ */
  151. static int sa_free_rx_buf(struct keystone_crypto_data *dev_data,
  152. int fdq)
  153. {
  154. struct device *dev = &dev_data->pdev->dev;
  155. struct knav_dma_desc *desc;
  156. unsigned int buf_len, dma_sz;
  157. dma_addr_t dma;
  158. void *buf_ptr;
  159. while ((dma = knav_queue_pop(dev_data->rx_fdq[fdq], &dma_sz))) {
  160. desc = knav_pool_desc_unmap(dev_data->rx_pool, dma, dma_sz);
  161. if (unlikely(!desc)) {
  162. dev_err(dev, "failed to unmap Rx desc\n");
  163. return -EIO;
  164. }
  165. dma = desc->orig_buff;
  166. buf_len = desc->orig_len;
  167. buf_ptr = (void *)desc->sw_data[0];
  168. if (unlikely(!dma)) {
  169. dev_err(dev, "NULL orig_buff in desc\n");
  170. knav_pool_desc_put(dev_data->rx_pool, desc);
  171. return -EIO;
  172. }
  173. if (unlikely(!buf_ptr)) {
  174. dev_err(dev, "NULL bufptr in desc\n");
  175. knav_pool_desc_put(dev_data->rx_pool, desc);
  176. return -EIO;
  177. }
  178. if (fdq == 0) {
  179. dma_unmap_single(dev, dma, buf_len, DMA_FROM_DEVICE);
  180. kfree(buf_ptr);
  181. } else {
  182. dma_unmap_page(dev, dma, buf_len, DMA_FROM_DEVICE);
  183. __free_page(buf_ptr);
  184. }
  185. knav_pool_desc_put(dev_data->rx_pool, desc);
  186. }
  187. return 0;
  188. }
  189. static int sa_rxpool_free(struct keystone_crypto_data *dev_data)
  190. {
  191. struct device *dev = &dev_data->pdev->dev;
  192. int i;
  193. int ret = 0;
  194. for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && dev_data->rx_fdq[i] != NULL;
  195. i++) {
  196. ret = sa_free_rx_buf(dev_data, i);
  197. WARN_ON(ret);
  198. if (ret)
  199. return ret;
  200. }
  201. if (knav_pool_count(dev_data->rx_pool) != dev_data->rx_pool_size) {
  202. dev_err(dev, "Lost Rx (%d) descriptors %d/%d\n",
  203. dev_data->rx_pool_size -
  204. knav_pool_count(dev_data->rx_pool),
  205. dev_data->rx_pool_size,
  206. knav_pool_count(dev_data->rx_pool));
  207. return -EIO;
  208. }
  209. knav_pool_destroy(dev_data->rx_pool);
  210. dev_data->rx_pool = NULL;
  211. return ret;
  212. }
  213. /* DMA channel rx notify callback */
  214. static void sa_dma_notify_rx_compl(void *arg)
  215. {
  216. struct keystone_crypto_data *dev_data = arg;
  217. knav_queue_disable_notify(dev_data->rx_compl_q);
  218. tasklet_schedule(&dev_data->rx_task);
  219. }
  220. /* Rx tast tasklet code */
  221. static void sa_rx_task(unsigned long data)
  222. {
  223. struct keystone_crypto_data *dev_data =
  224. (struct keystone_crypto_data *)data;
  225. sa_rx_completion_process(dev_data);
  226. sa_rxpool_refill(dev_data);
  227. knav_queue_enable_notify(dev_data->rx_compl_q);
  228. }
  229. /* DMA channel tx notify callback */
  230. static void sa_dma_notify_tx_compl(void *arg)
  231. {
  232. struct keystone_crypto_data *dev_data = arg;
  233. knav_queue_disable_notify(dev_data->tx_compl_q);
  234. tasklet_schedule(&dev_data->tx_task);
  235. }
  236. /* Tx task tasklet code */
  237. static void sa_tx_task(unsigned long data)
  238. {
  239. struct keystone_crypto_data *dev_data =
  240. (struct keystone_crypto_data *)data;
  241. sa_tx_completion_process(dev_data);
  242. knav_queue_enable_notify(dev_data->tx_compl_q);
  243. }
  244. static int sa_free_resources(struct keystone_crypto_data *dev_data)
  245. {
  246. int i;
  247. int ret = 0;
  248. if (!IS_ERR_OR_NULL(dev_data->tx_chan)) {
  249. knav_dma_close_channel(dev_data->tx_chan);
  250. dev_data->tx_chan = NULL;
  251. }
  252. if (!IS_ERR_OR_NULL(dev_data->rx_chan)) {
  253. knav_dma_close_channel(dev_data->rx_chan);
  254. dev_data->rx_chan = NULL;
  255. }
  256. if (!IS_ERR_OR_NULL(dev_data->tx_submit_q)) {
  257. knav_queue_close(dev_data->tx_submit_q);
  258. dev_data->tx_submit_q = NULL;
  259. }
  260. if (!IS_ERR_OR_NULL(dev_data->tx_compl_q)) {
  261. knav_queue_close(dev_data->tx_compl_q);
  262. dev_data->tx_compl_q = NULL;
  263. }
  264. if (!IS_ERR_OR_NULL(dev_data->tx_pool)) {
  265. knav_pool_destroy(dev_data->tx_pool);
  266. dev_data->tx_pool = NULL;
  267. }
  268. if (!IS_ERR_OR_NULL(dev_data->rx_compl_q)) {
  269. knav_queue_close(dev_data->rx_compl_q);
  270. dev_data->rx_compl_q = NULL;
  271. }
  272. if (!IS_ERR_OR_NULL(dev_data->rx_pool))
  273. ret = sa_rxpool_free(dev_data);
  274. for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && dev_data->rx_fdq[i] != NULL;
  275. i++) {
  276. knav_queue_close(dev_data->rx_fdq[i]);
  277. dev_data->rx_fdq[i] = NULL;
  278. }
  279. return ret;
  280. }
  281. static int sa_setup_resources(struct keystone_crypto_data *dev_data)
  282. {
  283. struct device *dev = &dev_data->pdev->dev;
  284. u8 name[20];
  285. int ret = 0;
  286. int i;
  287. snprintf(name, sizeof(name), "rx-pool-%s", dev_name(dev));
  288. dev_data->rx_pool = knav_pool_create(name, dev_data->rx_pool_size,
  289. dev_data->rx_pool_region_id);
  290. if (IS_ERR_OR_NULL(dev_data->rx_pool)) {
  291. dev_err(dev, "Couldn't create rx pool\n");
  292. return PTR_ERR(dev_data->rx_pool);
  293. }
  294. snprintf(name, sizeof(name), "tx-pool-%s", dev_name(dev));
  295. dev_data->tx_pool = knav_pool_create(name, dev_data->tx_pool_size,
  296. dev_data->tx_pool_region_id);
  297. if (IS_ERR_OR_NULL(dev_data->tx_pool)) {
  298. dev_err(dev, "Couldn't create tx pool\n");
  299. return PTR_ERR(dev_data->tx_pool);
  300. }
  301. snprintf(name, sizeof(name), "tx-subm-q-%s", dev_name(dev));
  302. dev_data->tx_submit_q = knav_queue_open(name,
  303. dev_data->tx_submit_qid,
  304. KNAV_QUEUE_SHARED);
  305. if (IS_ERR(dev_data->tx_submit_q)) {
  306. ret = PTR_ERR(dev_data->tx_submit_q);
  307. dev_err(dev, "Could not open \"%s\": %d\n", name, ret);
  308. return ret;
  309. }
  310. snprintf(name, sizeof(name), "tx-compl-q-%s", dev_name(dev));
  311. dev_data->tx_compl_q = knav_queue_open(name, dev_data->tx_compl_qid, 0);
  312. if (IS_ERR(dev_data->tx_compl_q)) {
  313. ret = PTR_ERR(dev_data->tx_compl_q);
  314. dev_err(dev, "Could not open \"%s\": %d\n", name, ret);
  315. return ret;
  316. }
  317. snprintf(name, sizeof(name), "rx-compl-q-%s", dev_name(dev));
  318. dev_data->rx_compl_q = knav_queue_open(name, dev_data->rx_compl_qid, 0);
  319. if (IS_ERR(dev_data->rx_compl_q)) {
  320. ret = PTR_ERR(dev_data->rx_compl_q);
  321. dev_err(dev, "Could not open \"%s\": %d\n", name, ret);
  322. return ret;
  323. }
  324. for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && dev_data->rx_queue_depths[i];
  325. i++) {
  326. snprintf(name, sizeof(name), "rx-fdq%d-%s", i, dev_name(dev));
  327. dev_data->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0);
  328. if (IS_ERR_OR_NULL(dev_data->rx_fdq[i]))
  329. return PTR_ERR(dev_data->rx_fdq[i]);
  330. }
  331. ret = sa_rxpool_refill(dev_data);
  332. return ret;
  333. }
  334. static int sa_setup_dma(struct keystone_crypto_data *dev_data)
  335. {
  336. struct device *dev = &dev_data->pdev->dev;
  337. struct knav_queue_notify_config notify_cfg;
  338. struct knav_dma_cfg config;
  339. int error = 0;
  340. int i;
  341. u32 last_fdq = 0;
  342. u8 name[16];
  343. error = sa_setup_resources(dev_data);
  344. if (error)
  345. goto fail;
  346. /* Setup Tx DMA channel */
  347. memset(&config, 0, sizeof(config));
  348. config.direction = DMA_MEM_TO_DEV;
  349. config.u.tx.filt_einfo = false;
  350. config.u.tx.filt_pswords = false;
  351. config.u.tx.priority = DMA_PRIO_MED_L;
  352. dev_data->tx_chan = knav_dma_open_channel(dev, dev_data->tx_chan_name,
  353. &config);
  354. if (IS_ERR_OR_NULL(dev_data->tx_chan)) {
  355. dev_err(dev, "(%s) failed to open dmachan\n",
  356. dev_data->tx_chan_name);
  357. error = -ENODEV;
  358. goto fail;
  359. }
  360. notify_cfg.fn = sa_dma_notify_tx_compl;
  361. notify_cfg.fn_arg = dev_data;
  362. error = knav_queue_device_control(dev_data->tx_compl_q,
  363. KNAV_QUEUE_SET_NOTIFIER,
  364. (unsigned long)&notify_cfg);
  365. if (error)
  366. goto fail;
  367. knav_queue_enable_notify(dev_data->tx_compl_q);
  368. dev_dbg(dev, "opened tx channel %s\n", name);
  369. /* Set notification for Rx completion */
  370. notify_cfg.fn = sa_dma_notify_rx_compl;
  371. notify_cfg.fn_arg = dev_data;
  372. error = knav_queue_device_control(dev_data->rx_compl_q,
  373. KNAV_QUEUE_SET_NOTIFIER,
  374. (unsigned long)&notify_cfg);
  375. if (error)
  376. goto fail;
  377. knav_queue_disable_notify(dev_data->rx_compl_q);
  378. /* Setup Rx DMA channel */
  379. memset(&config, 0, sizeof(config));
  380. config.direction = DMA_DEV_TO_MEM;
  381. config.u.rx.einfo_present = true;
  382. config.u.rx.psinfo_present = true;
  383. config.u.rx.err_mode = DMA_RETRY;
  384. config.u.rx.desc_type = DMA_DESC_HOST;
  385. config.u.rx.psinfo_at_sop = false;
  386. config.u.rx.sop_offset = 0; /* NETCP_SOP_OFFSET */
  387. config.u.rx.dst_q = dev_data->rx_compl_qid;
  388. config.u.rx.thresh = DMA_THRESH_NONE;
  389. for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; i++) {
  390. if (dev_data->rx_fdq[i])
  391. last_fdq = knav_queue_get_id(dev_data->rx_fdq[i]);
  392. config.u.rx.fdq[i] = last_fdq;
  393. }
  394. dev_data->rx_chan = knav_dma_open_channel(dev, dev_data->rx_chan_name,
  395. &config);
  396. if (IS_ERR_OR_NULL(dev_data->rx_chan)) {
  397. dev_err(dev, "(%s) failed to open dmachan\n",
  398. dev_data->rx_chan_name);
  399. error = -ENODEV;
  400. goto fail;
  401. }
  402. knav_queue_enable_notify(dev_data->rx_compl_q);
  403. return 0;
  404. fail:
  405. sa_free_resources(dev_data);
  406. return error;
  407. }
  408. /* SYSFS interface functions */
  409. struct sa_kobj_attribute {
  410. struct attribute attr;
  411. ssize_t (*show)(struct keystone_crypto_data *crypto,
  412. struct sa_kobj_attribute *attr, char *buf);
  413. ssize_t (*store)(struct keystone_crypto_data *crypto,
  414. struct sa_kobj_attribute *attr, const char *, size_t);
  415. };
  416. static
  417. ssize_t sa_stats_show_tx_pkts(struct keystone_crypto_data *crypto,
  418. struct sa_kobj_attribute *attr, char *buf)
  419. {
  420. return scnprintf(buf, PAGE_SIZE, "%d\n",
  421. atomic_read(&crypto->stats.tx_pkts));
  422. }
  423. static
  424. ssize_t sa_stats_reset_tx_pkts(struct keystone_crypto_data *crypto,
  425. struct sa_kobj_attribute *attr,
  426. const char *buf, size_t len)
  427. {
  428. atomic_set(&crypto->stats.tx_pkts, 0);
  429. return len;
  430. }
  431. static
  432. ssize_t sa_stats_show_rx_pkts(struct keystone_crypto_data *crypto,
  433. struct sa_kobj_attribute *attr, char *buf)
  434. {
  435. return scnprintf(buf, PAGE_SIZE, "%d\n",
  436. atomic_read(&crypto->stats.rx_pkts));
  437. }
  438. static ssize_t sa_stats_reset_rx_pkts(struct keystone_crypto_data *crypto,
  439. struct sa_kobj_attribute *attr,
  440. const char *buf, size_t len)
  441. {
  442. atomic_set(&crypto->stats.rx_pkts, 0);
  443. return len;
  444. }
  445. static
  446. ssize_t sa_stats_show_tx_drop_pkts(struct keystone_crypto_data *crypto,
  447. struct sa_kobj_attribute *attr, char *buf)
  448. {
  449. return scnprintf(buf, PAGE_SIZE, "%d\n",
  450. atomic_read(&crypto->stats.tx_dropped));
  451. }
  452. static
  453. ssize_t sa_stats_reset_tx_drop_pkts(struct keystone_crypto_data *crypto,
  454. struct sa_kobj_attribute *attr,
  455. const char *buf, size_t len)
  456. {
  457. atomic_set(&crypto->stats.tx_dropped, 0);
  458. return len;
  459. }
  460. static ssize_t
  461. sa_stats_show_sc_tear_drop_pkts(struct keystone_crypto_data *crypto,
  462. struct sa_kobj_attribute *attr, char *buf)
  463. {
  464. return scnprintf(buf, PAGE_SIZE, "%d\n",
  465. atomic_read(&crypto->stats.sc_tear_dropped));
  466. }
  467. static SA_ATTR(tx_pkts, S_IRUGO | S_IWUSR,
  468. sa_stats_show_tx_pkts, sa_stats_reset_tx_pkts);
  469. static SA_ATTR(rx_pkts, S_IRUGO | S_IWUSR,
  470. sa_stats_show_rx_pkts, sa_stats_reset_rx_pkts);
  471. static SA_ATTR(tx_drop_pkts, S_IRUGO | S_IWUSR,
  472. sa_stats_show_tx_drop_pkts, sa_stats_reset_tx_drop_pkts);
  473. static SA_ATTR(sc_tear_drop_pkts, S_IRUGO,
  474. sa_stats_show_sc_tear_drop_pkts, NULL);
  475. static struct attribute *sa_stats_attrs[] = {
  476. &sa_attr_tx_pkts.attr,
  477. &sa_attr_rx_pkts.attr,
  478. &sa_attr_tx_drop_pkts.attr,
  479. &sa_attr_sc_tear_drop_pkts.attr,
  480. NULL
  481. };
  482. static ssize_t sa_kobj_attr_show(struct kobject *kobj, struct attribute *attr,
  483. char *buf)
  484. {
  485. struct sa_kobj_attribute *sa_attr = to_sa_kobj_attr(attr);
  486. struct keystone_crypto_data *crypto =
  487. to_crypto_data_from_stats_obj(kobj);
  488. ssize_t ret = -EIO;
  489. if (sa_attr->show)
  490. ret = sa_attr->show(crypto, sa_attr, buf);
  491. return ret;
  492. }
  493. static
  494. ssize_t sa_kobj_attr_store(struct kobject *kobj, struct attribute *attr,
  495. const char *buf, size_t len)
  496. {
  497. struct sa_kobj_attribute *sa_attr = to_sa_kobj_attr(attr);
  498. struct keystone_crypto_data *crypto =
  499. to_crypto_data_from_stats_obj(kobj);
  500. ssize_t ret = -EIO;
  501. if (sa_attr->store)
  502. ret = sa_attr->store(crypto, sa_attr, buf, len);
  503. return ret;
  504. }
  505. static const struct sysfs_ops sa_stats_sysfs_ops = {
  506. .show = sa_kobj_attr_show,
  507. .store = sa_kobj_attr_store,
  508. };
  509. static struct kobj_type sa_stats_ktype = {
  510. .sysfs_ops = &sa_stats_sysfs_ops,
  511. .default_attrs = sa_stats_attrs,
  512. };
  513. static int sa_create_sysfs_entries(struct keystone_crypto_data *crypto)
  514. {
  515. struct device *dev = &crypto->pdev->dev;
  516. int ret;
  517. ret = kobject_init_and_add(&crypto->stats_kobj, &sa_stats_ktype,
  518. kobject_get(&dev->kobj), "stats");
  519. if (ret) {
  520. dev_err(dev, "failed to create sysfs entry\n");
  521. kobject_put(&crypto->stats_kobj);
  522. kobject_put(&dev->kobj);
  523. }
  524. if (!ret)
  525. crypto->stats_fl = 1;
  526. return ret;
  527. }
  528. static void sa_delete_sysfs_entries(struct keystone_crypto_data *crypto)
  529. {
  530. if (crypto->stats_fl)
  531. kobject_del(&crypto->stats_kobj);
  532. }
  533. static int sa_read_dtb(struct device_node *node,
  534. struct keystone_crypto_data *dev_data)
  535. {
  536. int i, ret = 0;
  537. struct device *dev = &dev_data->pdev->dev;
  538. u32 temp[2];
  539. ret = of_property_read_string(node, "ti,tx-channel",
  540. &dev_data->tx_chan_name);
  541. if (ret < 0) {
  542. dev_err(dev, "missing \"ti,tx-channel\" parameter\n");
  543. return -EINVAL;
  544. }
  545. ret = of_property_read_u32(node, "ti,tx-queue-depth",
  546. &dev_data->tx_queue_depth);
  547. if (ret < 0) {
  548. dev_err(dev, "missing \"ti,tx-queue-depth\" parameter\n");
  549. return -EINVAL;
  550. }
  551. atomic_set(&dev_data->tx_dma_desc_cnt, dev_data->tx_queue_depth);
  552. ret = of_property_read_u32(node, "ti,tx-submit-queue",
  553. &dev_data->tx_submit_qid);
  554. if (ret < 0) {
  555. dev_err(dev, "missing \"ti,tx-submit-queue\" parameter\n");
  556. return -EINVAL;
  557. }
  558. ret = of_property_read_u32(node, "ti,tx-completion-queue",
  559. &dev_data->tx_compl_qid);
  560. if (ret < 0) {
  561. dev_err(dev, "missing \"ti,tx-completion-queue\" parameter\n");
  562. return -EINVAL;
  563. }
  564. ret = of_property_read_string(node, "ti,rx-channel",
  565. &dev_data->rx_chan_name);
  566. if (ret < 0) {
  567. dev_err(dev, "missing \"ti,rx-channel\" parameter\n");
  568. return -EINVAL;
  569. }
  570. ret = of_property_read_u32_array(node, "ti,rx-queue-depth",
  571. dev_data->rx_queue_depths,
  572. KNAV_DMA_FDQ_PER_CHAN);
  573. if (ret < 0) {
  574. dev_err(dev, "missing \"ti,rx-queue-depth\" parameter\n");
  575. return -EINVAL;
  576. }
  577. for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; i++)
  578. dev_dbg(dev, "rx-queue-depth[%d]= %u\n", i,
  579. dev_data->rx_queue_depths[i]);
  580. atomic_set(&dev_data->rx_dma_page_cnt, 0);
  581. ret = of_property_read_u32(node, "ti,rx-compl-queue",
  582. &dev_data->rx_compl_qid);
  583. if (ret < 0) {
  584. dev_err(dev, "missing \"ti,rx-compl-queue\" parameter\n");
  585. return -EINVAL;
  586. }
  587. ret = of_property_read_u32_array(node, "ti,tx-pool", temp, 2);
  588. if (ret < 0) {
  589. dev_err(dev, "missing \"ti,tx-pool\" parameter\n");
  590. return -EINVAL;
  591. }
  592. dev_data->tx_pool_size = temp[0];
  593. dev_data->tx_pool_region_id = temp[1];
  594. ret = of_property_read_u32_array(node, "ti,rx-pool", temp, 2);
  595. if (ret < 0) {
  596. dev_err(dev, "missing \"ti,rx-pool\" parameter\n");
  597. return -EINVAL;
  598. }
  599. dev_data->rx_pool_size = temp[0];
  600. dev_data->rx_pool_region_id = temp[1];
  601. ret = of_property_read_u32_array(node, "ti,sc-id", temp, 2);
  602. if (ret < 0) {
  603. dev_err(dev, "missing \"ti,sc-id\" parameter\n");
  604. return -EINVAL;
  605. }
  606. dev_data->sc_id_start = temp[0];
  607. dev_data->sc_id_end = temp[1];
  608. dev_data->sc_id = dev_data->sc_id_start;
  609. dev_data->sa_regmap = syscon_regmap_lookup_by_phandle(node,
  610. "syscon-subsys");
  611. if (IS_ERR(dev_data->sa_regmap)) {
  612. dev_err(dev, "syscon_regmap_lookup_by_phandle failed\n");
  613. return -EINVAL;
  614. }
  615. return 0;
  616. }
  617. static int keystone_crypto_remove(struct platform_device *pdev)
  618. {
  619. struct keystone_crypto_data *dev_data = platform_get_drvdata(pdev);
  620. int ret = 0;
  621. /* un-register crypto algorithms */
  622. sa_unregister_algos(&pdev->dev);
  623. /* Delete SYSFS entries */
  624. sa_delete_sysfs_entries(dev_data);
  625. /* Release DMA resources */
  626. ret = sa_free_resources(dev_data);
  627. /* Kill tasklets */
  628. tasklet_kill(&dev_data->rx_task);
  629. tasklet_kill(&dev_data->tx_task);
  630. /* Free memory pools used by the driver */
  631. dma_pool_destroy(dev_data->sc_pool);
  632. kmem_cache_destroy(dev_data->dma_req_ctx_cache);
  633. pm_runtime_put_sync(&pdev->dev);
  634. pm_runtime_disable(&pdev->dev);
  635. return ret;
  636. }
  637. static int sa_request_firmware(struct device *dev)
  638. {
  639. const struct firmware *fw;
  640. int ret;
  641. ret = request_firmware(&fw, "sa_mci.fw", dev);
  642. if (ret < 0) {
  643. dev_err(dev, "request_firmware failed\n");
  644. return ret;
  645. }
  646. memcpy(&sa_mci_tbl, fw->data, fw->size);
  647. release_firmware(fw);
  648. return 0;
  649. }
  650. static int keystone_crypto_probe(struct platform_device *pdev)
  651. {
  652. struct device *dev = &pdev->dev;
  653. struct device_node *node = pdev->dev.of_node;
  654. struct keystone_crypto_data *dev_data;
  655. u32 value;
  656. int ret;
  657. sa_ks2_dev = dev;
  658. dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL);
  659. if (!dev_data)
  660. return -ENOMEM;
  661. dev_data->pdev = pdev;
  662. pm_runtime_enable(dev);
  663. ret = pm_runtime_get_sync(dev);
  664. if (ret < 0) {
  665. dev_err(dev, "Failed to enable SA power-domain\n");
  666. pm_runtime_disable(dev);
  667. return ret;
  668. }
  669. /* Read configuration from device tree */
  670. ret = sa_read_dtb(node, dev_data);
  671. if (ret) {
  672. dev_err(dev, "Failed to get all relevant configurations from DTB...\n");
  673. return ret;
  674. }
  675. tasklet_init(&dev_data->rx_task, sa_rx_task, (unsigned long)dev_data);
  676. /* Enable the required sub-modules in SA */
  677. ret = regmap_read(dev_data->sa_regmap, SA_CMD_STATUS_OFS, &value);
  678. if (ret)
  679. goto err_1;
  680. value |= (SA_CMD_ENCSS_EN | SA_CMD_AUTHSS_EN |
  681. SA_CMD_CTXCACH_EN | SA_CMD_SA1_IN_EN |
  682. SA_CMD_SA0_IN_EN | SA_CMD_SA1_OUT_EN |
  683. SA_CMD_SA0_OUT_EN);
  684. ret = regmap_write(dev_data->sa_regmap, SA_CMD_STATUS_OFS, value);
  685. if (ret)
  686. goto err_1;
  687. tasklet_init(&dev_data->rx_task, sa_rx_task,
  688. (unsigned long)dev_data);
  689. tasklet_init(&dev_data->tx_task, sa_tx_task, (unsigned long)dev_data);
  690. /* Initialize statistic counters */
  691. atomic_set(&dev_data->stats.tx_dropped, 0);
  692. atomic_set(&dev_data->stats.sc_tear_dropped, 0);
  693. atomic_set(&dev_data->stats.tx_pkts, 0);
  694. atomic_set(&dev_data->stats.rx_pkts, 0);
  695. /* Initialize memory pools used by the driver */
  696. dev_data->sc_pool = dma_pool_create("keystone-sc", dev,
  697. SA_CTX_MAX_SZ, 64, 0);
  698. if (!dev_data->sc_pool) {
  699. dev_err(dev, "Failed to create dma pool");
  700. ret = -ENOMEM;
  701. goto err_1;
  702. }
  703. /* Create a cache for Tx DMA request context */
  704. dev_data->dma_req_ctx_cache = KMEM_CACHE(sa_dma_req_ctx, 0);
  705. if (!dev_data->dma_req_ctx_cache) {
  706. dev_err(dev, "Failed to create dma req cache");
  707. ret = -ENOMEM;
  708. goto err_2;
  709. }
  710. /* Setup DMA channels */
  711. ret = sa_setup_dma(dev_data);
  712. if (ret) {
  713. dev_err(dev, "Failed to set DMA channels");
  714. goto err_3;
  715. }
  716. /* Initialize the SC-ID allocation lock */
  717. spin_lock_init(&dev_data->scid_lock);
  718. /* Create sysfs entries */
  719. ret = sa_create_sysfs_entries(dev_data);
  720. if (ret)
  721. goto err_3;
  722. ret = sa_request_firmware(dev);
  723. if (ret < 0)
  724. goto err_3;
  725. platform_set_drvdata(pdev, dev_data);
  726. /* Register crypto algorithms */
  727. sa_register_algos(dev);
  728. dev_info(dev, "crypto accelerator enabled\n");
  729. return 0;
  730. err_3:
  731. kmem_cache_destroy(dev_data->dma_req_ctx_cache);
  732. err_2:
  733. dma_pool_destroy(dev_data->sc_pool);
  734. err_1:
  735. tasklet_kill(&dev_data->rx_task);
  736. tasklet_kill(&dev_data->tx_task);
  737. return ret;
  738. }
  739. static const struct of_device_id of_match[] = {
  740. { .compatible = "ti,netcp-sa-crypto", },
  741. {},
  742. };
  743. MODULE_DEVICE_TABLE(of, of_match);
  744. static struct platform_driver keystone_crypto_driver = {
  745. .probe = keystone_crypto_probe,
  746. .remove = keystone_crypto_remove,
  747. .driver = {
  748. .name = "keystone-crypto",
  749. .of_match_table = of_match,
  750. },
  751. };
  752. module_platform_driver(keystone_crypto_driver);
  753. MODULE_DESCRIPTION("Keystone crypto acceleration support.");
  754. MODULE_LICENSE("GPL v2");
  755. MODULE_AUTHOR("Sandeep Nair");
  756. MODULE_AUTHOR("Vitaly Andrianov");