ccp-pci.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356
  1. /*
  2. * AMD Cryptographic Coprocessor (CCP) driver
  3. *
  4. * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
  5. *
  6. * Author: Tom Lendacky <thomas.lendacky@amd.com>
  7. * Author: Gary R Hook <gary.hook@amd.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/module.h>
  14. #include <linux/kernel.h>
  15. #include <linux/device.h>
  16. #include <linux/pci.h>
  17. #include <linux/pci_ids.h>
  18. #include <linux/dma-mapping.h>
  19. #include <linux/kthread.h>
  20. #include <linux/sched.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/delay.h>
  24. #include <linux/ccp.h>
  25. #include "ccp-dev.h"
  26. #define MSIX_VECTORS 2
  27. struct ccp_msix {
  28. u32 vector;
  29. char name[16];
  30. };
  31. struct ccp_pci {
  32. int msix_count;
  33. struct ccp_msix msix[MSIX_VECTORS];
  34. };
  35. static int ccp_get_msix_irqs(struct ccp_device *ccp)
  36. {
  37. struct ccp_pci *ccp_pci = ccp->dev_specific;
  38. struct device *dev = ccp->dev;
  39. struct pci_dev *pdev = to_pci_dev(dev);
  40. struct msix_entry msix_entry[MSIX_VECTORS];
  41. unsigned int name_len = sizeof(ccp_pci->msix[0].name) - 1;
  42. int v, ret;
  43. for (v = 0; v < ARRAY_SIZE(msix_entry); v++)
  44. msix_entry[v].entry = v;
  45. ret = pci_enable_msix_range(pdev, msix_entry, 1, v);
  46. if (ret < 0)
  47. return ret;
  48. ccp_pci->msix_count = ret;
  49. for (v = 0; v < ccp_pci->msix_count; v++) {
  50. /* Set the interrupt names and request the irqs */
  51. snprintf(ccp_pci->msix[v].name, name_len, "%s-%u",
  52. ccp->name, v);
  53. ccp_pci->msix[v].vector = msix_entry[v].vector;
  54. ret = request_irq(ccp_pci->msix[v].vector,
  55. ccp->vdata->perform->irqhandler,
  56. 0, ccp_pci->msix[v].name, dev);
  57. if (ret) {
  58. dev_notice(dev, "unable to allocate MSI-X IRQ (%d)\n",
  59. ret);
  60. goto e_irq;
  61. }
  62. }
  63. ccp->use_tasklet = true;
  64. return 0;
  65. e_irq:
  66. while (v--)
  67. free_irq(ccp_pci->msix[v].vector, dev);
  68. pci_disable_msix(pdev);
  69. ccp_pci->msix_count = 0;
  70. return ret;
  71. }
  72. static int ccp_get_msi_irq(struct ccp_device *ccp)
  73. {
  74. struct device *dev = ccp->dev;
  75. struct pci_dev *pdev = to_pci_dev(dev);
  76. int ret;
  77. ret = pci_enable_msi(pdev);
  78. if (ret)
  79. return ret;
  80. ccp->irq = pdev->irq;
  81. ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0,
  82. ccp->name, dev);
  83. if (ret) {
  84. dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
  85. goto e_msi;
  86. }
  87. ccp->use_tasklet = true;
  88. return 0;
  89. e_msi:
  90. pci_disable_msi(pdev);
  91. return ret;
  92. }
  93. static int ccp_get_irqs(struct ccp_device *ccp)
  94. {
  95. struct device *dev = ccp->dev;
  96. int ret;
  97. ret = ccp_get_msix_irqs(ccp);
  98. if (!ret)
  99. return 0;
  100. /* Couldn't get MSI-X vectors, try MSI */
  101. dev_notice(dev, "could not enable MSI-X (%d), trying MSI\n", ret);
  102. ret = ccp_get_msi_irq(ccp);
  103. if (!ret)
  104. return 0;
  105. /* Couldn't get MSI interrupt */
  106. dev_notice(dev, "could not enable MSI (%d)\n", ret);
  107. return ret;
  108. }
  109. static void ccp_free_irqs(struct ccp_device *ccp)
  110. {
  111. struct ccp_pci *ccp_pci = ccp->dev_specific;
  112. struct device *dev = ccp->dev;
  113. struct pci_dev *pdev = to_pci_dev(dev);
  114. if (ccp_pci->msix_count) {
  115. while (ccp_pci->msix_count--)
  116. free_irq(ccp_pci->msix[ccp_pci->msix_count].vector,
  117. dev);
  118. pci_disable_msix(pdev);
  119. } else if (ccp->irq) {
  120. free_irq(ccp->irq, dev);
  121. pci_disable_msi(pdev);
  122. }
  123. ccp->irq = 0;
  124. }
  125. static int ccp_find_mmio_area(struct ccp_device *ccp)
  126. {
  127. struct device *dev = ccp->dev;
  128. struct pci_dev *pdev = to_pci_dev(dev);
  129. resource_size_t io_len;
  130. unsigned long io_flags;
  131. io_flags = pci_resource_flags(pdev, ccp->vdata->bar);
  132. io_len = pci_resource_len(pdev, ccp->vdata->bar);
  133. if ((io_flags & IORESOURCE_MEM) &&
  134. (io_len >= (ccp->vdata->offset + 0x800)))
  135. return ccp->vdata->bar;
  136. return -EIO;
  137. }
  138. static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  139. {
  140. struct ccp_device *ccp;
  141. struct ccp_pci *ccp_pci;
  142. struct device *dev = &pdev->dev;
  143. unsigned int bar;
  144. int ret;
  145. ret = -ENOMEM;
  146. ccp = ccp_alloc_struct(dev);
  147. if (!ccp)
  148. goto e_err;
  149. ccp_pci = devm_kzalloc(dev, sizeof(*ccp_pci), GFP_KERNEL);
  150. if (!ccp_pci)
  151. goto e_err;
  152. ccp->dev_specific = ccp_pci;
  153. ccp->vdata = (struct ccp_vdata *)id->driver_data;
  154. if (!ccp->vdata || !ccp->vdata->version) {
  155. ret = -ENODEV;
  156. dev_err(dev, "missing driver data\n");
  157. goto e_err;
  158. }
  159. ccp->get_irq = ccp_get_irqs;
  160. ccp->free_irq = ccp_free_irqs;
  161. ret = pci_request_regions(pdev, "ccp");
  162. if (ret) {
  163. dev_err(dev, "pci_request_regions failed (%d)\n", ret);
  164. goto e_err;
  165. }
  166. ret = pci_enable_device(pdev);
  167. if (ret) {
  168. dev_err(dev, "pci_enable_device failed (%d)\n", ret);
  169. goto e_regions;
  170. }
  171. pci_set_master(pdev);
  172. ret = ccp_find_mmio_area(ccp);
  173. if (ret < 0)
  174. goto e_device;
  175. bar = ret;
  176. ret = -EIO;
  177. ccp->io_map = pci_iomap(pdev, bar, 0);
  178. if (!ccp->io_map) {
  179. dev_err(dev, "pci_iomap failed\n");
  180. goto e_device;
  181. }
  182. ccp->io_regs = ccp->io_map + ccp->vdata->offset;
  183. ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
  184. if (ret) {
  185. ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
  186. if (ret) {
  187. dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
  188. ret);
  189. goto e_iomap;
  190. }
  191. }
  192. dev_set_drvdata(dev, ccp);
  193. if (ccp->vdata->setup)
  194. ccp->vdata->setup(ccp);
  195. ret = ccp->vdata->perform->init(ccp);
  196. if (ret)
  197. goto e_iomap;
  198. dev_notice(dev, "enabled\n");
  199. return 0;
  200. e_iomap:
  201. pci_iounmap(pdev, ccp->io_map);
  202. e_device:
  203. pci_disable_device(pdev);
  204. e_regions:
  205. pci_release_regions(pdev);
  206. e_err:
  207. dev_notice(dev, "initialization failed\n");
  208. return ret;
  209. }
  210. static void ccp_pci_remove(struct pci_dev *pdev)
  211. {
  212. struct device *dev = &pdev->dev;
  213. struct ccp_device *ccp = dev_get_drvdata(dev);
  214. if (!ccp)
  215. return;
  216. ccp->vdata->perform->destroy(ccp);
  217. pci_iounmap(pdev, ccp->io_map);
  218. pci_disable_device(pdev);
  219. pci_release_regions(pdev);
  220. dev_notice(dev, "disabled\n");
  221. }
  222. #ifdef CONFIG_PM
  223. static int ccp_pci_suspend(struct pci_dev *pdev, pm_message_t state)
  224. {
  225. struct device *dev = &pdev->dev;
  226. struct ccp_device *ccp = dev_get_drvdata(dev);
  227. unsigned long flags;
  228. unsigned int i;
  229. spin_lock_irqsave(&ccp->cmd_lock, flags);
  230. ccp->suspending = 1;
  231. /* Wake all the queue kthreads to prepare for suspend */
  232. for (i = 0; i < ccp->cmd_q_count; i++)
  233. wake_up_process(ccp->cmd_q[i].kthread);
  234. spin_unlock_irqrestore(&ccp->cmd_lock, flags);
  235. /* Wait for all queue kthreads to say they're done */
  236. while (!ccp_queues_suspended(ccp))
  237. wait_event_interruptible(ccp->suspend_queue,
  238. ccp_queues_suspended(ccp));
  239. return 0;
  240. }
  241. static int ccp_pci_resume(struct pci_dev *pdev)
  242. {
  243. struct device *dev = &pdev->dev;
  244. struct ccp_device *ccp = dev_get_drvdata(dev);
  245. unsigned long flags;
  246. unsigned int i;
  247. spin_lock_irqsave(&ccp->cmd_lock, flags);
  248. ccp->suspending = 0;
  249. /* Wake up all the kthreads */
  250. for (i = 0; i < ccp->cmd_q_count; i++) {
  251. ccp->cmd_q[i].suspended = 0;
  252. wake_up_process(ccp->cmd_q[i].kthread);
  253. }
  254. spin_unlock_irqrestore(&ccp->cmd_lock, flags);
  255. return 0;
  256. }
  257. #endif
  258. static const struct pci_device_id ccp_pci_table[] = {
  259. { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&ccpv3 },
  260. { PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&ccpv5a },
  261. { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&ccpv5b },
  262. /* Last entry must be zero */
  263. { 0, }
  264. };
  265. MODULE_DEVICE_TABLE(pci, ccp_pci_table);
  266. static struct pci_driver ccp_pci_driver = {
  267. .name = "ccp",
  268. .id_table = ccp_pci_table,
  269. .probe = ccp_pci_probe,
  270. .remove = ccp_pci_remove,
  271. #ifdef CONFIG_PM
  272. .suspend = ccp_pci_suspend,
  273. .resume = ccp_pci_resume,
  274. #endif
  275. };
  276. int ccp_pci_init(void)
  277. {
  278. return pci_register_driver(&ccp_pci_driver);
  279. }
  280. void ccp_pci_exit(void)
  281. {
  282. pci_unregister_driver(&ccp_pci_driver);
  283. }