cppi41.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223
  1. #include <linux/delay.h>
  2. #include <linux/dmaengine.h>
  3. #include <linux/dma-mapping.h>
  4. #include <linux/platform_device.h>
  5. #include <linux/module.h>
  6. #include <linux/of.h>
  7. #include <linux/slab.h>
  8. #include <linux/of_dma.h>
  9. #include <linux/of_irq.h>
  10. #include <linux/dmapool.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/of_address.h>
  13. #include <linux/pm_runtime.h>
  14. #include "dmaengine.h"
  15. #define DESC_TYPE 27
  16. #define DESC_TYPE_HOST 0x10
  17. #define DESC_TYPE_TEARD 0x13
  18. #define TD_DESC_IS_RX (1 << 16)
  19. #define TD_DESC_DMA_NUM 10
  20. #define DESC_LENGTH_BITS_NUM 21
  21. #define DESC_TYPE_USB (5 << 26)
  22. #define DESC_PD_COMPLETE (1 << 31)
  23. /* DMA engine */
  24. #define DMA_TDFDQ 4
  25. #define DMA_TXGCR(x) (0x800 + (x) * 0x20)
  26. #define DMA_RXGCR(x) (0x808 + (x) * 0x20)
  27. #define RXHPCRA0 4
  28. #define GCR_CHAN_ENABLE (1 << 31)
  29. #define GCR_TEARDOWN (1 << 30)
  30. #define GCR_STARV_RETRY (1 << 24)
  31. #define GCR_DESC_TYPE_HOST (1 << 14)
  32. /* DMA scheduler */
  33. #define DMA_SCHED_CTRL 0
  34. #define DMA_SCHED_CTRL_EN (1 << 31)
  35. #define DMA_SCHED_WORD(x) ((x) * 4 + 0x800)
  36. #define SCHED_ENTRY0_CHAN(x) ((x) << 0)
  37. #define SCHED_ENTRY0_IS_RX (1 << 7)
  38. #define SCHED_ENTRY1_CHAN(x) ((x) << 8)
  39. #define SCHED_ENTRY1_IS_RX (1 << 15)
  40. #define SCHED_ENTRY2_CHAN(x) ((x) << 16)
  41. #define SCHED_ENTRY2_IS_RX (1 << 23)
  42. #define SCHED_ENTRY3_CHAN(x) ((x) << 24)
  43. #define SCHED_ENTRY3_IS_RX (1 << 31)
  44. /* Queue manager */
  45. /* 4 KiB of memory for descriptors, 2 for each endpoint */
  46. #define ALLOC_DECS_NUM 128
  47. #define DESCS_AREAS 1
  48. #define TOTAL_DESCS_NUM (ALLOC_DECS_NUM * DESCS_AREAS)
  49. #define QMGR_SCRATCH_SIZE (TOTAL_DESCS_NUM * 4)
  50. #define QMGR_LRAM0_BASE 0x80
  51. #define QMGR_LRAM_SIZE 0x84
  52. #define QMGR_LRAM1_BASE 0x88
  53. #define QMGR_MEMBASE(x) (0x1000 + (x) * 0x10)
  54. #define QMGR_MEMCTRL(x) (0x1004 + (x) * 0x10)
  55. #define QMGR_MEMCTRL_IDX_SH 16
  56. #define QMGR_MEMCTRL_DESC_SH 8
  57. #define QMGR_PEND(x) (0x90 + (x) * 4)
  58. #define QMGR_PENDING_SLOT_Q(x) (x / 32)
  59. #define QMGR_PENDING_BIT_Q(x) (x % 32)
  60. #define QMGR_QUEUE_A(n) (0x2000 + (n) * 0x10)
  61. #define QMGR_QUEUE_B(n) (0x2004 + (n) * 0x10)
  62. #define QMGR_QUEUE_C(n) (0x2008 + (n) * 0x10)
  63. #define QMGR_QUEUE_D(n) (0x200c + (n) * 0x10)
  64. /* Packet Descriptor */
  65. #define PD2_ZERO_LENGTH (1 << 19)
  66. struct cppi41_channel {
  67. struct dma_chan chan;
  68. struct dma_async_tx_descriptor txd;
  69. struct cppi41_dd *cdd;
  70. struct cppi41_desc *desc;
  71. dma_addr_t desc_phys;
  72. void __iomem *gcr_reg;
  73. int is_tx;
  74. u32 residue;
  75. unsigned int q_num;
  76. unsigned int q_comp_num;
  77. unsigned int port_num;
  78. unsigned td_retry;
  79. unsigned td_queued:1;
  80. unsigned td_seen:1;
  81. unsigned td_desc_seen:1;
  82. struct list_head node; /* Node for pending list */
  83. };
  84. struct cppi41_desc {
  85. u32 pd0;
  86. u32 pd1;
  87. u32 pd2;
  88. u32 pd3;
  89. u32 pd4;
  90. u32 pd5;
  91. u32 pd6;
  92. u32 pd7;
  93. } __aligned(32);
  94. struct chan_queues {
  95. u16 submit;
  96. u16 complete;
  97. };
  98. struct cppi41_dd {
  99. struct dma_device ddev;
  100. void *qmgr_scratch;
  101. dma_addr_t scratch_phys;
  102. struct cppi41_desc *cd;
  103. dma_addr_t descs_phys;
  104. u32 first_td_desc;
  105. struct cppi41_channel *chan_busy[ALLOC_DECS_NUM];
  106. void __iomem *ctrl_mem;
  107. void __iomem *sched_mem;
  108. void __iomem *qmgr_mem;
  109. unsigned int irq;
  110. const struct chan_queues *queues_rx;
  111. const struct chan_queues *queues_tx;
  112. struct chan_queues td_queue;
  113. u16 first_completion_queue;
  114. u16 qmgr_num_pend;
  115. u32 n_chans;
  116. u8 platform;
  117. struct list_head pending; /* Pending queued transfers */
  118. spinlock_t lock; /* Lock for pending list */
  119. /* context for suspend/resume */
  120. unsigned int dma_tdfdq;
  121. bool is_suspended;
  122. };
  123. static struct chan_queues am335x_usb_queues_tx[] = {
  124. /* USB0 ENDP 1 */
  125. [ 0] = { .submit = 32, .complete = 93},
  126. [ 1] = { .submit = 34, .complete = 94},
  127. [ 2] = { .submit = 36, .complete = 95},
  128. [ 3] = { .submit = 38, .complete = 96},
  129. [ 4] = { .submit = 40, .complete = 97},
  130. [ 5] = { .submit = 42, .complete = 98},
  131. [ 6] = { .submit = 44, .complete = 99},
  132. [ 7] = { .submit = 46, .complete = 100},
  133. [ 8] = { .submit = 48, .complete = 101},
  134. [ 9] = { .submit = 50, .complete = 102},
  135. [10] = { .submit = 52, .complete = 103},
  136. [11] = { .submit = 54, .complete = 104},
  137. [12] = { .submit = 56, .complete = 105},
  138. [13] = { .submit = 58, .complete = 106},
  139. [14] = { .submit = 60, .complete = 107},
  140. /* USB1 ENDP1 */
  141. [15] = { .submit = 62, .complete = 125},
  142. [16] = { .submit = 64, .complete = 126},
  143. [17] = { .submit = 66, .complete = 127},
  144. [18] = { .submit = 68, .complete = 128},
  145. [19] = { .submit = 70, .complete = 129},
  146. [20] = { .submit = 72, .complete = 130},
  147. [21] = { .submit = 74, .complete = 131},
  148. [22] = { .submit = 76, .complete = 132},
  149. [23] = { .submit = 78, .complete = 133},
  150. [24] = { .submit = 80, .complete = 134},
  151. [25] = { .submit = 82, .complete = 135},
  152. [26] = { .submit = 84, .complete = 136},
  153. [27] = { .submit = 86, .complete = 137},
  154. [28] = { .submit = 88, .complete = 138},
  155. [29] = { .submit = 90, .complete = 139},
  156. };
  157. static const struct chan_queues am335x_usb_queues_rx[] = {
  158. /* USB0 ENDP 1 */
  159. [ 0] = { .submit = 1, .complete = 109},
  160. [ 1] = { .submit = 2, .complete = 110},
  161. [ 2] = { .submit = 3, .complete = 111},
  162. [ 3] = { .submit = 4, .complete = 112},
  163. [ 4] = { .submit = 5, .complete = 113},
  164. [ 5] = { .submit = 6, .complete = 114},
  165. [ 6] = { .submit = 7, .complete = 115},
  166. [ 7] = { .submit = 8, .complete = 116},
  167. [ 8] = { .submit = 9, .complete = 117},
  168. [ 9] = { .submit = 10, .complete = 118},
  169. [10] = { .submit = 11, .complete = 119},
  170. [11] = { .submit = 12, .complete = 120},
  171. [12] = { .submit = 13, .complete = 121},
  172. [13] = { .submit = 14, .complete = 122},
  173. [14] = { .submit = 15, .complete = 123},
  174. /* USB1 ENDP 1 */
  175. [15] = { .submit = 16, .complete = 141},
  176. [16] = { .submit = 17, .complete = 142},
  177. [17] = { .submit = 18, .complete = 143},
  178. [18] = { .submit = 19, .complete = 144},
  179. [19] = { .submit = 20, .complete = 145},
  180. [20] = { .submit = 21, .complete = 146},
  181. [21] = { .submit = 22, .complete = 147},
  182. [22] = { .submit = 23, .complete = 148},
  183. [23] = { .submit = 24, .complete = 149},
  184. [24] = { .submit = 25, .complete = 150},
  185. [25] = { .submit = 26, .complete = 151},
  186. [26] = { .submit = 27, .complete = 152},
  187. [27] = { .submit = 28, .complete = 153},
  188. [28] = { .submit = 29, .complete = 154},
  189. [29] = { .submit = 30, .complete = 155},
  190. };
  191. static const struct chan_queues da8xx_usb_queues_tx[] = {
  192. [0] = { .submit = 16, .complete = 24},
  193. [1] = { .submit = 18, .complete = 24},
  194. [2] = { .submit = 20, .complete = 24},
  195. [3] = { .submit = 22, .complete = 24},
  196. };
  197. static const struct chan_queues da8xx_usb_queues_rx[] = {
  198. [0] = { .submit = 1, .complete = 26},
  199. [1] = { .submit = 3, .complete = 26},
  200. [2] = { .submit = 5, .complete = 26},
  201. [3] = { .submit = 7, .complete = 26},
  202. };
  203. struct cppi_glue_infos {
  204. const struct chan_queues *queues_rx;
  205. const struct chan_queues *queues_tx;
  206. struct chan_queues td_queue;
  207. u16 first_completion_queue;
  208. u16 qmgr_num_pend;
  209. };
  210. static struct cppi41_channel *to_cpp41_chan(struct dma_chan *c)
  211. {
  212. return container_of(c, struct cppi41_channel, chan);
  213. }
  214. static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc)
  215. {
  216. struct cppi41_channel *c;
  217. u32 descs_size;
  218. u32 desc_num;
  219. descs_size = sizeof(struct cppi41_desc) * ALLOC_DECS_NUM;
  220. if (!((desc >= cdd->descs_phys) &&
  221. (desc < (cdd->descs_phys + descs_size)))) {
  222. return NULL;
  223. }
  224. desc_num = (desc - cdd->descs_phys) / sizeof(struct cppi41_desc);
  225. BUG_ON(desc_num >= ALLOC_DECS_NUM);
  226. c = cdd->chan_busy[desc_num];
  227. cdd->chan_busy[desc_num] = NULL;
  228. /* Usecount for chan_busy[], paired with push_desc_queue() */
  229. pm_runtime_put(cdd->ddev.dev);
  230. return c;
  231. }
  232. static void cppi_writel(u32 val, void *__iomem *mem)
  233. {
  234. __raw_writel(val, mem);
  235. }
  236. static u32 cppi_readl(void *__iomem *mem)
  237. {
  238. return __raw_readl(mem);
  239. }
  240. static u32 pd_trans_len(u32 val)
  241. {
  242. return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1);
  243. }
  244. static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num)
  245. {
  246. u32 desc;
  247. desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num));
  248. desc &= ~0x1f;
  249. return desc;
  250. }
  251. static irqreturn_t cppi41_irq(int irq, void *data)
  252. {
  253. struct cppi41_dd *cdd = data;
  254. u16 first_completion_queue = cdd->first_completion_queue;
  255. u16 qmgr_num_pend = cdd->qmgr_num_pend;
  256. struct cppi41_channel *c;
  257. int i;
  258. for (i = QMGR_PENDING_SLOT_Q(first_completion_queue); i < qmgr_num_pend;
  259. i++) {
  260. u32 val;
  261. u32 q_num;
  262. val = cppi_readl(cdd->qmgr_mem + QMGR_PEND(i));
  263. if (i == QMGR_PENDING_SLOT_Q(first_completion_queue) && val) {
  264. u32 mask;
  265. /* set corresponding bit for completetion Q 93 */
  266. mask = 1 << QMGR_PENDING_BIT_Q(first_completion_queue);
  267. /* not set all bits for queues less than Q 93 */
  268. mask--;
  269. /* now invert and keep only Q 93+ set */
  270. val &= ~mask;
  271. }
  272. if (val)
  273. __iormb();
  274. while (val) {
  275. u32 desc, len;
  276. /*
  277. * This should never trigger, see the comments in
  278. * push_desc_queue()
  279. */
  280. WARN_ON(cdd->is_suspended);
  281. q_num = __fls(val);
  282. val &= ~(1 << q_num);
  283. q_num += 32 * i;
  284. desc = cppi41_pop_desc(cdd, q_num);
  285. c = desc_to_chan(cdd, desc);
  286. if (WARN_ON(!c)) {
  287. pr_err("%s() q %d desc %08x\n", __func__,
  288. q_num, desc);
  289. continue;
  290. }
  291. if (c->desc->pd2 & PD2_ZERO_LENGTH)
  292. len = 0;
  293. else
  294. len = pd_trans_len(c->desc->pd0);
  295. c->residue = pd_trans_len(c->desc->pd6) - len;
  296. dma_cookie_complete(&c->txd);
  297. dmaengine_desc_get_callback_invoke(&c->txd, NULL);
  298. }
  299. }
  300. return IRQ_HANDLED;
  301. }
  302. static dma_cookie_t cppi41_tx_submit(struct dma_async_tx_descriptor *tx)
  303. {
  304. dma_cookie_t cookie;
  305. cookie = dma_cookie_assign(tx);
  306. return cookie;
  307. }
  308. static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan)
  309. {
  310. struct cppi41_channel *c = to_cpp41_chan(chan);
  311. struct cppi41_dd *cdd = c->cdd;
  312. int error;
  313. error = pm_runtime_get_sync(cdd->ddev.dev);
  314. if (error < 0) {
  315. dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
  316. __func__, error);
  317. pm_runtime_put_noidle(cdd->ddev.dev);
  318. return error;
  319. }
  320. dma_cookie_init(chan);
  321. dma_async_tx_descriptor_init(&c->txd, chan);
  322. c->txd.tx_submit = cppi41_tx_submit;
  323. if (!c->is_tx)
  324. cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0);
  325. pm_runtime_mark_last_busy(cdd->ddev.dev);
  326. pm_runtime_put_autosuspend(cdd->ddev.dev);
  327. return 0;
  328. }
  329. static void cppi41_dma_free_chan_resources(struct dma_chan *chan)
  330. {
  331. struct cppi41_channel *c = to_cpp41_chan(chan);
  332. struct cppi41_dd *cdd = c->cdd;
  333. int error;
  334. error = pm_runtime_get_sync(cdd->ddev.dev);
  335. if (error < 0) {
  336. pm_runtime_put_noidle(cdd->ddev.dev);
  337. return;
  338. }
  339. WARN_ON(!list_empty(&cdd->pending));
  340. pm_runtime_mark_last_busy(cdd->ddev.dev);
  341. pm_runtime_put_autosuspend(cdd->ddev.dev);
  342. }
  343. static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan,
  344. dma_cookie_t cookie, struct dma_tx_state *txstate)
  345. {
  346. struct cppi41_channel *c = to_cpp41_chan(chan);
  347. enum dma_status ret;
  348. ret = dma_cookie_status(chan, cookie, txstate);
  349. dma_set_residue(txstate, c->residue);
  350. return ret;
  351. }
  352. static void push_desc_queue(struct cppi41_channel *c)
  353. {
  354. struct cppi41_dd *cdd = c->cdd;
  355. u32 desc_num;
  356. u32 desc_phys;
  357. u32 reg;
  358. c->residue = 0;
  359. reg = GCR_CHAN_ENABLE;
  360. if (!c->is_tx) {
  361. reg |= GCR_STARV_RETRY;
  362. reg |= GCR_DESC_TYPE_HOST;
  363. reg |= c->q_comp_num;
  364. }
  365. cppi_writel(reg, c->gcr_reg);
  366. /*
  367. * We don't use writel() but __raw_writel() so we have to make sure
  368. * that the DMA descriptor in coherent memory made to the main memory
  369. * before starting the dma engine.
  370. */
  371. __iowmb();
  372. /*
  373. * DMA transfers can take at least 200ms to complete with USB mass
  374. * storage connected. To prevent autosuspend timeouts, we must use
  375. * pm_runtime_get/put() when chan_busy[] is modified. This will get
  376. * cleared in desc_to_chan() or cppi41_stop_chan() depending on the
  377. * outcome of the transfer.
  378. */
  379. pm_runtime_get(cdd->ddev.dev);
  380. desc_phys = lower_32_bits(c->desc_phys);
  381. desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
  382. WARN_ON(cdd->chan_busy[desc_num]);
  383. cdd->chan_busy[desc_num] = c;
  384. reg = (sizeof(struct cppi41_desc) - 24) / 4;
  385. reg |= desc_phys;
  386. cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
  387. }
  388. /*
  389. * Caller must hold cdd->lock to prevent push_desc_queue()
  390. * getting called out of order. We have both cppi41_dma_issue_pending()
  391. * and cppi41_runtime_resume() call this function.
  392. */
  393. static void cppi41_run_queue(struct cppi41_dd *cdd)
  394. {
  395. struct cppi41_channel *c, *_c;
  396. list_for_each_entry_safe(c, _c, &cdd->pending, node) {
  397. push_desc_queue(c);
  398. list_del(&c->node);
  399. }
  400. }
  401. static void cppi41_dma_issue_pending(struct dma_chan *chan)
  402. {
  403. struct cppi41_channel *c = to_cpp41_chan(chan);
  404. struct cppi41_dd *cdd = c->cdd;
  405. unsigned long flags;
  406. int error;
  407. error = pm_runtime_get(cdd->ddev.dev);
  408. if ((error != -EINPROGRESS) && error < 0) {
  409. pm_runtime_put_noidle(cdd->ddev.dev);
  410. dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n",
  411. error);
  412. return;
  413. }
  414. spin_lock_irqsave(&cdd->lock, flags);
  415. list_add_tail(&c->node, &cdd->pending);
  416. if (!cdd->is_suspended)
  417. cppi41_run_queue(cdd);
  418. spin_unlock_irqrestore(&cdd->lock, flags);
  419. pm_runtime_mark_last_busy(cdd->ddev.dev);
  420. pm_runtime_put_autosuspend(cdd->ddev.dev);
  421. }
  422. static u32 get_host_pd0(u32 length)
  423. {
  424. u32 reg;
  425. reg = DESC_TYPE_HOST << DESC_TYPE;
  426. reg |= length;
  427. return reg;
  428. }
  429. static u32 get_host_pd1(struct cppi41_channel *c)
  430. {
  431. u32 reg;
  432. reg = 0;
  433. return reg;
  434. }
  435. static u32 get_host_pd2(struct cppi41_channel *c)
  436. {
  437. u32 reg;
  438. reg = DESC_TYPE_USB;
  439. reg |= c->q_comp_num;
  440. return reg;
  441. }
  442. static u32 get_host_pd3(u32 length)
  443. {
  444. u32 reg;
  445. /* PD3 = packet size */
  446. reg = length;
  447. return reg;
  448. }
  449. static u32 get_host_pd6(u32 length)
  450. {
  451. u32 reg;
  452. /* PD6 buffer size */
  453. reg = DESC_PD_COMPLETE;
  454. reg |= length;
  455. return reg;
  456. }
  457. static u32 get_host_pd4_or_7(u32 addr)
  458. {
  459. u32 reg;
  460. reg = addr;
  461. return reg;
  462. }
  463. static u32 get_host_pd5(void)
  464. {
  465. u32 reg;
  466. reg = 0;
  467. return reg;
  468. }
  469. static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
  470. struct dma_chan *chan, struct scatterlist *sgl, unsigned sg_len,
  471. enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
  472. {
  473. struct cppi41_channel *c = to_cpp41_chan(chan);
  474. struct cppi41_desc *d;
  475. struct scatterlist *sg;
  476. unsigned int i;
  477. d = c->desc;
  478. for_each_sg(sgl, sg, sg_len, i) {
  479. u32 addr;
  480. u32 len;
  481. /* We need to use more than one desc once musb supports sg */
  482. addr = lower_32_bits(sg_dma_address(sg));
  483. len = sg_dma_len(sg);
  484. d->pd0 = get_host_pd0(len);
  485. d->pd1 = get_host_pd1(c);
  486. d->pd2 = get_host_pd2(c);
  487. d->pd3 = get_host_pd3(len);
  488. d->pd4 = get_host_pd4_or_7(addr);
  489. d->pd5 = get_host_pd5();
  490. d->pd6 = get_host_pd6(len);
  491. d->pd7 = get_host_pd4_or_7(addr);
  492. d++;
  493. }
  494. return &c->txd;
  495. }
  496. static void cppi41_compute_td_desc(struct cppi41_desc *d)
  497. {
  498. d->pd0 = DESC_TYPE_TEARD << DESC_TYPE;
  499. }
  500. static int cppi41_tear_down_chan(struct cppi41_channel *c)
  501. {
  502. struct dmaengine_result abort_result;
  503. struct cppi41_dd *cdd = c->cdd;
  504. struct cppi41_desc *td;
  505. u32 reg;
  506. u32 desc_phys;
  507. u32 td_desc_phys;
  508. td = cdd->cd;
  509. td += cdd->first_td_desc;
  510. td_desc_phys = cdd->descs_phys;
  511. td_desc_phys += cdd->first_td_desc * sizeof(struct cppi41_desc);
  512. if (!c->td_queued) {
  513. cppi41_compute_td_desc(td);
  514. __iowmb();
  515. reg = (sizeof(struct cppi41_desc) - 24) / 4;
  516. reg |= td_desc_phys;
  517. cppi_writel(reg, cdd->qmgr_mem +
  518. QMGR_QUEUE_D(cdd->td_queue.submit));
  519. reg = GCR_CHAN_ENABLE;
  520. if (!c->is_tx) {
  521. reg |= GCR_STARV_RETRY;
  522. reg |= GCR_DESC_TYPE_HOST;
  523. reg |= cdd->td_queue.complete;
  524. }
  525. reg |= GCR_TEARDOWN;
  526. cppi_writel(reg, c->gcr_reg);
  527. c->td_queued = 1;
  528. c->td_retry = 500;
  529. }
  530. if (!c->td_seen || !c->td_desc_seen) {
  531. desc_phys = cppi41_pop_desc(cdd, cdd->td_queue.complete);
  532. if (!desc_phys && c->is_tx)
  533. desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
  534. if (desc_phys == c->desc_phys) {
  535. c->td_desc_seen = 1;
  536. } else if (desc_phys == td_desc_phys) {
  537. u32 pd0;
  538. __iormb();
  539. pd0 = td->pd0;
  540. WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD);
  541. WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX));
  542. WARN_ON((pd0 & 0x1f) != c->port_num);
  543. c->td_seen = 1;
  544. } else if (desc_phys) {
  545. WARN_ON_ONCE(1);
  546. }
  547. }
  548. c->td_retry--;
  549. /*
  550. * If the TX descriptor / channel is in use, the caller needs to poke
  551. * his TD bit multiple times. After that he hardware releases the
  552. * transfer descriptor followed by TD descriptor. Waiting seems not to
  553. * cause any difference.
  554. * RX seems to be thrown out right away. However once the TearDown
  555. * descriptor gets through we are done. If we have seens the transfer
  556. * descriptor before the TD we fetch it from enqueue, it has to be
  557. * there waiting for us.
  558. */
  559. if (!c->td_seen && c->td_retry) {
  560. udelay(1);
  561. return -EAGAIN;
  562. }
  563. WARN_ON(!c->td_retry);
  564. if (!c->td_desc_seen) {
  565. desc_phys = cppi41_pop_desc(cdd, c->q_num);
  566. if (!desc_phys)
  567. desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
  568. WARN_ON(!desc_phys);
  569. }
  570. c->td_queued = 0;
  571. c->td_seen = 0;
  572. c->td_desc_seen = 0;
  573. cppi_writel(0, c->gcr_reg);
  574. /* Invoke the callback to do the necessary clean-up */
  575. abort_result.result = DMA_TRANS_ABORTED;
  576. dma_cookie_complete(&c->txd);
  577. dmaengine_desc_get_callback_invoke(&c->txd, &abort_result);
  578. return 0;
  579. }
  580. static int cppi41_stop_chan(struct dma_chan *chan)
  581. {
  582. struct cppi41_channel *c = to_cpp41_chan(chan);
  583. struct cppi41_dd *cdd = c->cdd;
  584. u32 desc_num;
  585. u32 desc_phys;
  586. int ret;
  587. desc_phys = lower_32_bits(c->desc_phys);
  588. desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
  589. if (!cdd->chan_busy[desc_num])
  590. return 0;
  591. ret = cppi41_tear_down_chan(c);
  592. if (ret)
  593. return ret;
  594. WARN_ON(!cdd->chan_busy[desc_num]);
  595. cdd->chan_busy[desc_num] = NULL;
  596. /* Usecount for chan_busy[], paired with push_desc_queue() */
  597. pm_runtime_put(cdd->ddev.dev);
  598. return 0;
  599. }
  600. static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd)
  601. {
  602. struct cppi41_channel *cchan, *chans;
  603. int i;
  604. u32 n_chans = cdd->n_chans;
  605. /*
  606. * The channels can only be used as TX or as RX. So we add twice
  607. * that much dma channels because USB can only do RX or TX.
  608. */
  609. n_chans *= 2;
  610. chans = devm_kcalloc(dev, n_chans, sizeof(*chans), GFP_KERNEL);
  611. if (!chans)
  612. return -ENOMEM;
  613. for (i = 0; i < n_chans; i++) {
  614. cchan = &chans[i];
  615. cchan->cdd = cdd;
  616. if (i & 1) {
  617. cchan->gcr_reg = cdd->ctrl_mem + DMA_TXGCR(i >> 1);
  618. cchan->is_tx = 1;
  619. } else {
  620. cchan->gcr_reg = cdd->ctrl_mem + DMA_RXGCR(i >> 1);
  621. cchan->is_tx = 0;
  622. }
  623. cchan->port_num = i >> 1;
  624. cchan->desc = &cdd->cd[i];
  625. cchan->desc_phys = cdd->descs_phys;
  626. cchan->desc_phys += i * sizeof(struct cppi41_desc);
  627. cchan->chan.device = &cdd->ddev;
  628. list_add_tail(&cchan->chan.device_node, &cdd->ddev.channels);
  629. }
  630. cdd->first_td_desc = n_chans;
  631. return 0;
  632. }
  633. static void purge_descs(struct device *dev, struct cppi41_dd *cdd)
  634. {
  635. unsigned int mem_decs;
  636. int i;
  637. mem_decs = ALLOC_DECS_NUM * sizeof(struct cppi41_desc);
  638. for (i = 0; i < DESCS_AREAS; i++) {
  639. cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i));
  640. cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i));
  641. dma_free_coherent(dev, mem_decs, cdd->cd,
  642. cdd->descs_phys);
  643. }
  644. }
  645. static void disable_sched(struct cppi41_dd *cdd)
  646. {
  647. cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
  648. }
  649. static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd)
  650. {
  651. disable_sched(cdd);
  652. purge_descs(dev, cdd);
  653. cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
  654. cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
  655. dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
  656. cdd->scratch_phys);
  657. }
  658. static int init_descs(struct device *dev, struct cppi41_dd *cdd)
  659. {
  660. unsigned int desc_size;
  661. unsigned int mem_decs;
  662. int i;
  663. u32 reg;
  664. u32 idx;
  665. BUILD_BUG_ON(sizeof(struct cppi41_desc) &
  666. (sizeof(struct cppi41_desc) - 1));
  667. BUILD_BUG_ON(sizeof(struct cppi41_desc) < 32);
  668. BUILD_BUG_ON(ALLOC_DECS_NUM < 32);
  669. desc_size = sizeof(struct cppi41_desc);
  670. mem_decs = ALLOC_DECS_NUM * desc_size;
  671. idx = 0;
  672. for (i = 0; i < DESCS_AREAS; i++) {
  673. reg = idx << QMGR_MEMCTRL_IDX_SH;
  674. reg |= (ilog2(desc_size) - 5) << QMGR_MEMCTRL_DESC_SH;
  675. reg |= ilog2(ALLOC_DECS_NUM) - 5;
  676. BUILD_BUG_ON(DESCS_AREAS != 1);
  677. cdd->cd = dma_alloc_coherent(dev, mem_decs,
  678. &cdd->descs_phys, GFP_KERNEL);
  679. if (!cdd->cd)
  680. return -ENOMEM;
  681. cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
  682. cppi_writel(reg, cdd->qmgr_mem + QMGR_MEMCTRL(i));
  683. idx += ALLOC_DECS_NUM;
  684. }
  685. return 0;
  686. }
  687. static void init_sched(struct cppi41_dd *cdd)
  688. {
  689. unsigned ch;
  690. unsigned word;
  691. u32 reg;
  692. word = 0;
  693. cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
  694. for (ch = 0; ch < cdd->n_chans; ch += 2) {
  695. reg = SCHED_ENTRY0_CHAN(ch);
  696. reg |= SCHED_ENTRY1_CHAN(ch) | SCHED_ENTRY1_IS_RX;
  697. reg |= SCHED_ENTRY2_CHAN(ch + 1);
  698. reg |= SCHED_ENTRY3_CHAN(ch + 1) | SCHED_ENTRY3_IS_RX;
  699. cppi_writel(reg, cdd->sched_mem + DMA_SCHED_WORD(word));
  700. word++;
  701. }
  702. reg = cdd->n_chans * 2 - 1;
  703. reg |= DMA_SCHED_CTRL_EN;
  704. cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL);
  705. }
  706. static int init_cppi41(struct device *dev, struct cppi41_dd *cdd)
  707. {
  708. int ret;
  709. BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1));
  710. cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE,
  711. &cdd->scratch_phys, GFP_KERNEL);
  712. if (!cdd->qmgr_scratch)
  713. return -ENOMEM;
  714. cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE);
  715. cppi_writel(TOTAL_DESCS_NUM, cdd->qmgr_mem + QMGR_LRAM_SIZE);
  716. cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
  717. ret = init_descs(dev, cdd);
  718. if (ret)
  719. goto err_td;
  720. cppi_writel(cdd->td_queue.submit, cdd->ctrl_mem + DMA_TDFDQ);
  721. init_sched(cdd);
  722. return 0;
  723. err_td:
  724. deinit_cppi41(dev, cdd);
  725. return ret;
  726. }
  727. static struct platform_driver cpp41_dma_driver;
  728. /*
  729. * The param format is:
  730. * X Y
  731. * X: Port
  732. * Y: 0 = RX else TX
  733. */
  734. #define INFO_PORT 0
  735. #define INFO_IS_TX 1
  736. static bool cpp41_dma_filter_fn(struct dma_chan *chan, void *param)
  737. {
  738. struct cppi41_channel *cchan;
  739. struct cppi41_dd *cdd;
  740. const struct chan_queues *queues;
  741. u32 *num = param;
  742. if (chan->device->dev->driver != &cpp41_dma_driver.driver)
  743. return false;
  744. cchan = to_cpp41_chan(chan);
  745. if (cchan->port_num != num[INFO_PORT])
  746. return false;
  747. if (cchan->is_tx && !num[INFO_IS_TX])
  748. return false;
  749. cdd = cchan->cdd;
  750. if (cchan->is_tx)
  751. queues = cdd->queues_tx;
  752. else
  753. queues = cdd->queues_rx;
  754. BUILD_BUG_ON(ARRAY_SIZE(am335x_usb_queues_rx) !=
  755. ARRAY_SIZE(am335x_usb_queues_tx));
  756. if (WARN_ON(cchan->port_num > ARRAY_SIZE(am335x_usb_queues_rx)))
  757. return false;
  758. cchan->q_num = queues[cchan->port_num].submit;
  759. cchan->q_comp_num = queues[cchan->port_num].complete;
  760. return true;
  761. }
  762. static struct of_dma_filter_info cpp41_dma_info = {
  763. .filter_fn = cpp41_dma_filter_fn,
  764. };
  765. static struct dma_chan *cppi41_dma_xlate(struct of_phandle_args *dma_spec,
  766. struct of_dma *ofdma)
  767. {
  768. int count = dma_spec->args_count;
  769. struct of_dma_filter_info *info = ofdma->of_dma_data;
  770. if (!info || !info->filter_fn)
  771. return NULL;
  772. if (count != 2)
  773. return NULL;
  774. return dma_request_channel(info->dma_cap, info->filter_fn,
  775. &dma_spec->args[0]);
  776. }
  777. static const struct cppi_glue_infos am335x_usb_infos = {
  778. .queues_rx = am335x_usb_queues_rx,
  779. .queues_tx = am335x_usb_queues_tx,
  780. .td_queue = { .submit = 31, .complete = 0 },
  781. .first_completion_queue = 93,
  782. .qmgr_num_pend = 5,
  783. };
  784. static const struct cppi_glue_infos da8xx_usb_infos = {
  785. .queues_rx = da8xx_usb_queues_rx,
  786. .queues_tx = da8xx_usb_queues_tx,
  787. .td_queue = { .submit = 31, .complete = 0 },
  788. .first_completion_queue = 24,
  789. .qmgr_num_pend = 2,
  790. };
  791. static const struct of_device_id cppi41_dma_ids[] = {
  792. { .compatible = "ti,am3359-cppi41", .data = &am335x_usb_infos},
  793. { .compatible = "ti,da830-cppi41", .data = &da8xx_usb_infos},
  794. {},
  795. };
  796. MODULE_DEVICE_TABLE(of, cppi41_dma_ids);
  797. static const struct cppi_glue_infos *get_glue_info(struct device *dev)
  798. {
  799. const struct of_device_id *of_id;
  800. of_id = of_match_node(cppi41_dma_ids, dev->of_node);
  801. if (!of_id)
  802. return NULL;
  803. return of_id->data;
  804. }
  805. #define CPPI41_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
  806. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
  807. BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
  808. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
  809. static int cppi41_dma_probe(struct platform_device *pdev)
  810. {
  811. struct cppi41_dd *cdd;
  812. struct device *dev = &pdev->dev;
  813. const struct cppi_glue_infos *glue_info;
  814. struct resource *mem;
  815. int index;
  816. int irq;
  817. int ret;
  818. glue_info = get_glue_info(dev);
  819. if (!glue_info)
  820. return -EINVAL;
  821. cdd = devm_kzalloc(&pdev->dev, sizeof(*cdd), GFP_KERNEL);
  822. if (!cdd)
  823. return -ENOMEM;
  824. dma_cap_set(DMA_SLAVE, cdd->ddev.cap_mask);
  825. cdd->ddev.device_alloc_chan_resources = cppi41_dma_alloc_chan_resources;
  826. cdd->ddev.device_free_chan_resources = cppi41_dma_free_chan_resources;
  827. cdd->ddev.device_tx_status = cppi41_dma_tx_status;
  828. cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
  829. cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
  830. cdd->ddev.device_terminate_all = cppi41_stop_chan;
  831. cdd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
  832. cdd->ddev.src_addr_widths = CPPI41_DMA_BUSWIDTHS;
  833. cdd->ddev.dst_addr_widths = CPPI41_DMA_BUSWIDTHS;
  834. cdd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  835. cdd->ddev.dev = dev;
  836. INIT_LIST_HEAD(&cdd->ddev.channels);
  837. cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
  838. index = of_property_match_string(dev->of_node,
  839. "reg-names", "controller");
  840. if (index < 0)
  841. return index;
  842. mem = platform_get_resource(pdev, IORESOURCE_MEM, index);
  843. cdd->ctrl_mem = devm_ioremap_resource(dev, mem);
  844. if (IS_ERR(cdd->ctrl_mem))
  845. return PTR_ERR(cdd->ctrl_mem);
  846. mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 1);
  847. cdd->sched_mem = devm_ioremap_resource(dev, mem);
  848. if (IS_ERR(cdd->sched_mem))
  849. return PTR_ERR(cdd->sched_mem);
  850. mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 2);
  851. cdd->qmgr_mem = devm_ioremap_resource(dev, mem);
  852. if (IS_ERR(cdd->qmgr_mem))
  853. return PTR_ERR(cdd->qmgr_mem);
  854. spin_lock_init(&cdd->lock);
  855. INIT_LIST_HEAD(&cdd->pending);
  856. platform_set_drvdata(pdev, cdd);
  857. pm_runtime_enable(dev);
  858. pm_runtime_set_autosuspend_delay(dev, 100);
  859. pm_runtime_use_autosuspend(dev);
  860. ret = pm_runtime_get_sync(dev);
  861. if (ret < 0)
  862. goto err_get_sync;
  863. cdd->queues_rx = glue_info->queues_rx;
  864. cdd->queues_tx = glue_info->queues_tx;
  865. cdd->td_queue = glue_info->td_queue;
  866. cdd->qmgr_num_pend = glue_info->qmgr_num_pend;
  867. cdd->first_completion_queue = glue_info->first_completion_queue;
  868. ret = of_property_read_u32(dev->of_node,
  869. "#dma-channels", &cdd->n_chans);
  870. if (ret)
  871. goto err_get_n_chans;
  872. ret = init_cppi41(dev, cdd);
  873. if (ret)
  874. goto err_init_cppi;
  875. ret = cppi41_add_chans(dev, cdd);
  876. if (ret)
  877. goto err_chans;
  878. irq = irq_of_parse_and_map(dev->of_node, 0);
  879. if (!irq) {
  880. ret = -EINVAL;
  881. goto err_chans;
  882. }
  883. ret = devm_request_irq(&pdev->dev, irq, cppi41_irq, IRQF_SHARED,
  884. dev_name(dev), cdd);
  885. if (ret)
  886. goto err_chans;
  887. cdd->irq = irq;
  888. ret = dma_async_device_register(&cdd->ddev);
  889. if (ret)
  890. goto err_chans;
  891. ret = of_dma_controller_register(dev->of_node,
  892. cppi41_dma_xlate, &cpp41_dma_info);
  893. if (ret)
  894. goto err_of;
  895. pm_runtime_mark_last_busy(dev);
  896. pm_runtime_put_autosuspend(dev);
  897. return 0;
  898. err_of:
  899. dma_async_device_unregister(&cdd->ddev);
  900. err_chans:
  901. deinit_cppi41(dev, cdd);
  902. err_init_cppi:
  903. pm_runtime_dont_use_autosuspend(dev);
  904. err_get_n_chans:
  905. err_get_sync:
  906. pm_runtime_put_sync(dev);
  907. pm_runtime_disable(dev);
  908. return ret;
  909. }
  910. static int cppi41_dma_remove(struct platform_device *pdev)
  911. {
  912. struct cppi41_dd *cdd = platform_get_drvdata(pdev);
  913. int error;
  914. error = pm_runtime_get_sync(&pdev->dev);
  915. if (error < 0)
  916. dev_err(&pdev->dev, "%s could not pm_runtime_get: %i\n",
  917. __func__, error);
  918. of_dma_controller_free(pdev->dev.of_node);
  919. dma_async_device_unregister(&cdd->ddev);
  920. devm_free_irq(&pdev->dev, cdd->irq, cdd);
  921. deinit_cppi41(&pdev->dev, cdd);
  922. pm_runtime_dont_use_autosuspend(&pdev->dev);
  923. pm_runtime_put_sync(&pdev->dev);
  924. pm_runtime_disable(&pdev->dev);
  925. return 0;
  926. }
  927. static int __maybe_unused cppi41_suspend(struct device *dev)
  928. {
  929. struct cppi41_dd *cdd = dev_get_drvdata(dev);
  930. cdd->dma_tdfdq = cppi_readl(cdd->ctrl_mem + DMA_TDFDQ);
  931. disable_sched(cdd);
  932. return 0;
  933. }
  934. static int __maybe_unused cppi41_resume(struct device *dev)
  935. {
  936. struct cppi41_dd *cdd = dev_get_drvdata(dev);
  937. struct cppi41_channel *c;
  938. int i;
  939. for (i = 0; i < DESCS_AREAS; i++)
  940. cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
  941. list_for_each_entry(c, &cdd->ddev.channels, chan.device_node)
  942. if (!c->is_tx)
  943. cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0);
  944. init_sched(cdd);
  945. cppi_writel(cdd->dma_tdfdq, cdd->ctrl_mem + DMA_TDFDQ);
  946. cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE);
  947. cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
  948. cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
  949. return 0;
  950. }
  951. static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
  952. {
  953. struct cppi41_dd *cdd = dev_get_drvdata(dev);
  954. unsigned long flags;
  955. spin_lock_irqsave(&cdd->lock, flags);
  956. cdd->is_suspended = true;
  957. WARN_ON(!list_empty(&cdd->pending));
  958. spin_unlock_irqrestore(&cdd->lock, flags);
  959. return 0;
  960. }
  961. static int __maybe_unused cppi41_runtime_resume(struct device *dev)
  962. {
  963. struct cppi41_dd *cdd = dev_get_drvdata(dev);
  964. unsigned long flags;
  965. spin_lock_irqsave(&cdd->lock, flags);
  966. cdd->is_suspended = false;
  967. cppi41_run_queue(cdd);
  968. spin_unlock_irqrestore(&cdd->lock, flags);
  969. return 0;
  970. }
  971. static const struct dev_pm_ops cppi41_pm_ops = {
  972. SET_LATE_SYSTEM_SLEEP_PM_OPS(cppi41_suspend, cppi41_resume)
  973. SET_RUNTIME_PM_OPS(cppi41_runtime_suspend,
  974. cppi41_runtime_resume,
  975. NULL)
  976. };
  977. static struct platform_driver cpp41_dma_driver = {
  978. .probe = cppi41_dma_probe,
  979. .remove = cppi41_dma_remove,
  980. .driver = {
  981. .name = "cppi41-dma-engine",
  982. .pm = &cppi41_pm_ops,
  983. .of_match_table = of_match_ptr(cppi41_dma_ids),
  984. },
  985. };
  986. module_platform_driver(cpp41_dma_driver);
  987. MODULE_LICENSE("GPL");
  988. MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");