apbh_dma.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616
  1. /*
  2. * Freescale i.MX28 APBH DMA driver
  3. *
  4. * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com>
  5. * on behalf of DENX Software Engineering GmbH
  6. *
  7. * Based on code from LTIB:
  8. * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved.
  9. *
  10. * SPDX-License-Identifier: GPL-2.0+
  11. */
  12. #include <linux/list.h>
  13. #include <common.h>
  14. #include <malloc.h>
  15. #include <linux/errno.h>
  16. #include <asm/io.h>
  17. #include <asm/arch/clock.h>
  18. #include <asm/arch/imx-regs.h>
  19. #include <asm/arch/sys_proto.h>
  20. #include <asm/imx-common/dma.h>
  21. #include <asm/imx-common/regs-apbh.h>
  22. static struct mxs_dma_chan mxs_dma_channels[MXS_MAX_DMA_CHANNELS];
  23. /*
  24. * Test is the DMA channel is valid channel
  25. */
  26. int mxs_dma_validate_chan(int channel)
  27. {
  28. struct mxs_dma_chan *pchan;
  29. if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS))
  30. return -EINVAL;
  31. pchan = mxs_dma_channels + channel;
  32. if (!(pchan->flags & MXS_DMA_FLAGS_ALLOCATED))
  33. return -EINVAL;
  34. return 0;
  35. }
  36. /*
  37. * Return the address of the command within a descriptor.
  38. */
  39. static unsigned int mxs_dma_cmd_address(struct mxs_dma_desc *desc)
  40. {
  41. return desc->address + offsetof(struct mxs_dma_desc, cmd);
  42. }
  43. /*
  44. * Read a DMA channel's hardware semaphore.
  45. *
  46. * As used by the MXS platform's DMA software, the DMA channel's hardware
  47. * semaphore reflects the number of DMA commands the hardware will process, but
  48. * has not yet finished. This is a volatile value read directly from hardware,
  49. * so it must be be viewed as immediately stale.
  50. *
  51. * If the channel is not marked busy, or has finished processing all its
  52. * commands, this value should be zero.
  53. *
  54. * See mxs_dma_append() for details on how DMA command blocks must be configured
  55. * to maintain the expected behavior of the semaphore's value.
  56. */
  57. static int mxs_dma_read_semaphore(int channel)
  58. {
  59. struct mxs_apbh_regs *apbh_regs =
  60. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  61. uint32_t tmp;
  62. int ret;
  63. ret = mxs_dma_validate_chan(channel);
  64. if (ret)
  65. return ret;
  66. tmp = readl(&apbh_regs->ch[channel].hw_apbh_ch_sema);
  67. tmp &= APBH_CHn_SEMA_PHORE_MASK;
  68. tmp >>= APBH_CHn_SEMA_PHORE_OFFSET;
  69. return tmp;
  70. }
  71. #ifndef CONFIG_SYS_DCACHE_OFF
  72. void mxs_dma_flush_desc(struct mxs_dma_desc *desc)
  73. {
  74. uint32_t addr;
  75. uint32_t size;
  76. addr = (uint32_t)desc;
  77. size = roundup(sizeof(struct mxs_dma_desc), MXS_DMA_ALIGNMENT);
  78. flush_dcache_range(addr, addr + size);
  79. }
  80. #else
  81. inline void mxs_dma_flush_desc(struct mxs_dma_desc *desc) {}
  82. #endif
  83. /*
  84. * Enable a DMA channel.
  85. *
  86. * If the given channel has any DMA descriptors on its active list, this
  87. * function causes the DMA hardware to begin processing them.
  88. *
  89. * This function marks the DMA channel as "busy," whether or not there are any
  90. * descriptors to process.
  91. */
  92. static int mxs_dma_enable(int channel)
  93. {
  94. struct mxs_apbh_regs *apbh_regs =
  95. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  96. unsigned int sem;
  97. struct mxs_dma_chan *pchan;
  98. struct mxs_dma_desc *pdesc;
  99. int ret;
  100. ret = mxs_dma_validate_chan(channel);
  101. if (ret)
  102. return ret;
  103. pchan = mxs_dma_channels + channel;
  104. if (pchan->pending_num == 0) {
  105. pchan->flags |= MXS_DMA_FLAGS_BUSY;
  106. return 0;
  107. }
  108. pdesc = list_first_entry(&pchan->active, struct mxs_dma_desc, node);
  109. if (pdesc == NULL)
  110. return -EFAULT;
  111. if (pchan->flags & MXS_DMA_FLAGS_BUSY) {
  112. if (!(pdesc->cmd.data & MXS_DMA_DESC_CHAIN))
  113. return 0;
  114. sem = mxs_dma_read_semaphore(channel);
  115. if (sem == 0)
  116. return 0;
  117. if (sem == 1) {
  118. pdesc = list_entry(pdesc->node.next,
  119. struct mxs_dma_desc, node);
  120. writel(mxs_dma_cmd_address(pdesc),
  121. &apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar);
  122. }
  123. writel(pchan->pending_num,
  124. &apbh_regs->ch[channel].hw_apbh_ch_sema);
  125. pchan->active_num += pchan->pending_num;
  126. pchan->pending_num = 0;
  127. } else {
  128. pchan->active_num += pchan->pending_num;
  129. pchan->pending_num = 0;
  130. writel(mxs_dma_cmd_address(pdesc),
  131. &apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar);
  132. writel(pchan->active_num,
  133. &apbh_regs->ch[channel].hw_apbh_ch_sema);
  134. writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
  135. &apbh_regs->hw_apbh_ctrl0_clr);
  136. }
  137. pchan->flags |= MXS_DMA_FLAGS_BUSY;
  138. return 0;
  139. }
  140. /*
  141. * Disable a DMA channel.
  142. *
  143. * This function shuts down a DMA channel and marks it as "not busy." Any
  144. * descriptors on the active list are immediately moved to the head of the
  145. * "done" list, whether or not they have actually been processed by the
  146. * hardware. The "ready" flags of these descriptors are NOT cleared, so they
  147. * still appear to be active.
  148. *
  149. * This function immediately shuts down a DMA channel's hardware, aborting any
  150. * I/O that may be in progress, potentially leaving I/O hardware in an undefined
  151. * state. It is unwise to call this function if there is ANY chance the hardware
  152. * is still processing a command.
  153. */
  154. static int mxs_dma_disable(int channel)
  155. {
  156. struct mxs_dma_chan *pchan;
  157. struct mxs_apbh_regs *apbh_regs =
  158. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  159. int ret;
  160. ret = mxs_dma_validate_chan(channel);
  161. if (ret)
  162. return ret;
  163. pchan = mxs_dma_channels + channel;
  164. if (!(pchan->flags & MXS_DMA_FLAGS_BUSY))
  165. return -EINVAL;
  166. writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
  167. &apbh_regs->hw_apbh_ctrl0_set);
  168. pchan->flags &= ~MXS_DMA_FLAGS_BUSY;
  169. pchan->active_num = 0;
  170. pchan->pending_num = 0;
  171. list_splice_init(&pchan->active, &pchan->done);
  172. return 0;
  173. }
  174. /*
  175. * Resets the DMA channel hardware.
  176. */
  177. static int mxs_dma_reset(int channel)
  178. {
  179. struct mxs_apbh_regs *apbh_regs =
  180. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  181. int ret;
  182. #if defined(CONFIG_MX23)
  183. uint32_t setreg = (uint32_t)(&apbh_regs->hw_apbh_ctrl0_set);
  184. uint32_t offset = APBH_CTRL0_RESET_CHANNEL_OFFSET;
  185. #elif (defined(CONFIG_MX28) || defined(CONFIG_MX6) || defined(CONFIG_MX7))
  186. uint32_t setreg = (uint32_t)(&apbh_regs->hw_apbh_channel_ctrl_set);
  187. uint32_t offset = APBH_CHANNEL_CTRL_RESET_CHANNEL_OFFSET;
  188. #endif
  189. ret = mxs_dma_validate_chan(channel);
  190. if (ret)
  191. return ret;
  192. writel(1 << (channel + offset), setreg);
  193. return 0;
  194. }
  195. /*
  196. * Enable or disable DMA interrupt.
  197. *
  198. * This function enables the given DMA channel to interrupt the CPU.
  199. */
  200. static int mxs_dma_enable_irq(int channel, int enable)
  201. {
  202. struct mxs_apbh_regs *apbh_regs =
  203. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  204. int ret;
  205. ret = mxs_dma_validate_chan(channel);
  206. if (ret)
  207. return ret;
  208. if (enable)
  209. writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET),
  210. &apbh_regs->hw_apbh_ctrl1_set);
  211. else
  212. writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET),
  213. &apbh_regs->hw_apbh_ctrl1_clr);
  214. return 0;
  215. }
  216. /*
  217. * Clear DMA interrupt.
  218. *
  219. * The software that is using the DMA channel must register to receive its
  220. * interrupts and, when they arrive, must call this function to clear them.
  221. */
  222. static int mxs_dma_ack_irq(int channel)
  223. {
  224. struct mxs_apbh_regs *apbh_regs =
  225. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  226. int ret;
  227. ret = mxs_dma_validate_chan(channel);
  228. if (ret)
  229. return ret;
  230. writel(1 << channel, &apbh_regs->hw_apbh_ctrl1_clr);
  231. writel(1 << channel, &apbh_regs->hw_apbh_ctrl2_clr);
  232. return 0;
  233. }
  234. /*
  235. * Request to reserve a DMA channel
  236. */
  237. static int mxs_dma_request(int channel)
  238. {
  239. struct mxs_dma_chan *pchan;
  240. if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS))
  241. return -EINVAL;
  242. pchan = mxs_dma_channels + channel;
  243. if ((pchan->flags & MXS_DMA_FLAGS_VALID) != MXS_DMA_FLAGS_VALID)
  244. return -ENODEV;
  245. if (pchan->flags & MXS_DMA_FLAGS_ALLOCATED)
  246. return -EBUSY;
  247. pchan->flags |= MXS_DMA_FLAGS_ALLOCATED;
  248. pchan->active_num = 0;
  249. pchan->pending_num = 0;
  250. INIT_LIST_HEAD(&pchan->active);
  251. INIT_LIST_HEAD(&pchan->done);
  252. return 0;
  253. }
  254. /*
  255. * Release a DMA channel.
  256. *
  257. * This function releases a DMA channel from its current owner.
  258. *
  259. * The channel will NOT be released if it's marked "busy" (see
  260. * mxs_dma_enable()).
  261. */
  262. int mxs_dma_release(int channel)
  263. {
  264. struct mxs_dma_chan *pchan;
  265. int ret;
  266. ret = mxs_dma_validate_chan(channel);
  267. if (ret)
  268. return ret;
  269. pchan = mxs_dma_channels + channel;
  270. if (pchan->flags & MXS_DMA_FLAGS_BUSY)
  271. return -EBUSY;
  272. pchan->dev = 0;
  273. pchan->active_num = 0;
  274. pchan->pending_num = 0;
  275. pchan->flags &= ~MXS_DMA_FLAGS_ALLOCATED;
  276. return 0;
  277. }
  278. /*
  279. * Allocate DMA descriptor
  280. */
  281. struct mxs_dma_desc *mxs_dma_desc_alloc(void)
  282. {
  283. struct mxs_dma_desc *pdesc;
  284. uint32_t size;
  285. size = roundup(sizeof(struct mxs_dma_desc), MXS_DMA_ALIGNMENT);
  286. pdesc = memalign(MXS_DMA_ALIGNMENT, size);
  287. if (pdesc == NULL)
  288. return NULL;
  289. memset(pdesc, 0, sizeof(*pdesc));
  290. pdesc->address = (dma_addr_t)pdesc;
  291. return pdesc;
  292. };
  293. /*
  294. * Free DMA descriptor
  295. */
  296. void mxs_dma_desc_free(struct mxs_dma_desc *pdesc)
  297. {
  298. if (pdesc == NULL)
  299. return;
  300. free(pdesc);
  301. }
  302. /*
  303. * Add a DMA descriptor to a channel.
  304. *
  305. * If the descriptor list for this channel is not empty, this function sets the
  306. * CHAIN bit and the NEXTCMD_ADDR fields in the last descriptor's DMA command so
  307. * it will chain to the new descriptor's command.
  308. *
  309. * Then, this function marks the new descriptor as "ready," adds it to the end
  310. * of the active descriptor list, and increments the count of pending
  311. * descriptors.
  312. *
  313. * The MXS platform DMA software imposes some rules on DMA commands to maintain
  314. * important invariants. These rules are NOT checked, but they must be carefully
  315. * applied by software that uses MXS DMA channels.
  316. *
  317. * Invariant:
  318. * The DMA channel's hardware semaphore must reflect the number of DMA
  319. * commands the hardware will process, but has not yet finished.
  320. *
  321. * Explanation:
  322. * A DMA channel begins processing commands when its hardware semaphore is
  323. * written with a value greater than zero, and it stops processing commands
  324. * when the semaphore returns to zero.
  325. *
  326. * When a channel finishes a DMA command, it will decrement its semaphore if
  327. * the DECREMENT_SEMAPHORE bit is set in that command's flags bits.
  328. *
  329. * In principle, it's not necessary for the DECREMENT_SEMAPHORE to be set,
  330. * unless it suits the purposes of the software. For example, one could
  331. * construct a series of five DMA commands, with the DECREMENT_SEMAPHORE
  332. * bit set only in the last one. Then, setting the DMA channel's hardware
  333. * semaphore to one would cause the entire series of five commands to be
  334. * processed. However, this example would violate the invariant given above.
  335. *
  336. * Rule:
  337. * ALL DMA commands MUST have the DECREMENT_SEMAPHORE bit set so that the DMA
  338. * channel's hardware semaphore will be decremented EVERY time a command is
  339. * processed.
  340. */
  341. int mxs_dma_desc_append(int channel, struct mxs_dma_desc *pdesc)
  342. {
  343. struct mxs_dma_chan *pchan;
  344. struct mxs_dma_desc *last;
  345. int ret;
  346. ret = mxs_dma_validate_chan(channel);
  347. if (ret)
  348. return ret;
  349. pchan = mxs_dma_channels + channel;
  350. pdesc->cmd.next = mxs_dma_cmd_address(pdesc);
  351. pdesc->flags |= MXS_DMA_DESC_FIRST | MXS_DMA_DESC_LAST;
  352. if (!list_empty(&pchan->active)) {
  353. last = list_entry(pchan->active.prev, struct mxs_dma_desc,
  354. node);
  355. pdesc->flags &= ~MXS_DMA_DESC_FIRST;
  356. last->flags &= ~MXS_DMA_DESC_LAST;
  357. last->cmd.next = mxs_dma_cmd_address(pdesc);
  358. last->cmd.data |= MXS_DMA_DESC_CHAIN;
  359. mxs_dma_flush_desc(last);
  360. }
  361. pdesc->flags |= MXS_DMA_DESC_READY;
  362. if (pdesc->flags & MXS_DMA_DESC_FIRST)
  363. pchan->pending_num++;
  364. list_add_tail(&pdesc->node, &pchan->active);
  365. mxs_dma_flush_desc(pdesc);
  366. return ret;
  367. }
  368. /*
  369. * Clean up processed DMA descriptors.
  370. *
  371. * This function removes processed DMA descriptors from the "active" list. Pass
  372. * in a non-NULL list head to get the descriptors moved to your list. Pass NULL
  373. * to get the descriptors moved to the channel's "done" list. Descriptors on
  374. * the "done" list can be retrieved with mxs_dma_get_finished().
  375. *
  376. * This function marks the DMA channel as "not busy" if no unprocessed
  377. * descriptors remain on the "active" list.
  378. */
  379. static int mxs_dma_finish(int channel, struct list_head *head)
  380. {
  381. int sem;
  382. struct mxs_dma_chan *pchan;
  383. struct list_head *p, *q;
  384. struct mxs_dma_desc *pdesc;
  385. int ret;
  386. ret = mxs_dma_validate_chan(channel);
  387. if (ret)
  388. return ret;
  389. pchan = mxs_dma_channels + channel;
  390. sem = mxs_dma_read_semaphore(channel);
  391. if (sem < 0)
  392. return sem;
  393. if (sem == pchan->active_num)
  394. return 0;
  395. list_for_each_safe(p, q, &pchan->active) {
  396. if ((pchan->active_num) <= sem)
  397. break;
  398. pdesc = list_entry(p, struct mxs_dma_desc, node);
  399. pdesc->flags &= ~MXS_DMA_DESC_READY;
  400. if (head)
  401. list_move_tail(p, head);
  402. else
  403. list_move_tail(p, &pchan->done);
  404. if (pdesc->flags & MXS_DMA_DESC_LAST)
  405. pchan->active_num--;
  406. }
  407. if (sem == 0)
  408. pchan->flags &= ~MXS_DMA_FLAGS_BUSY;
  409. return 0;
  410. }
  411. /*
  412. * Wait for DMA channel to complete
  413. */
  414. static int mxs_dma_wait_complete(uint32_t timeout, unsigned int chan)
  415. {
  416. struct mxs_apbh_regs *apbh_regs =
  417. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  418. int ret;
  419. ret = mxs_dma_validate_chan(chan);
  420. if (ret)
  421. return ret;
  422. if (mxs_wait_mask_set(&apbh_regs->hw_apbh_ctrl1_reg,
  423. 1 << chan, timeout)) {
  424. ret = -ETIMEDOUT;
  425. mxs_dma_reset(chan);
  426. }
  427. return ret;
  428. }
  429. /*
  430. * Execute the DMA channel
  431. */
  432. int mxs_dma_go(int chan)
  433. {
  434. uint32_t timeout = 10000000;
  435. int ret;
  436. LIST_HEAD(tmp_desc_list);
  437. mxs_dma_enable_irq(chan, 1);
  438. mxs_dma_enable(chan);
  439. /* Wait for DMA to finish. */
  440. ret = mxs_dma_wait_complete(timeout, chan);
  441. /* Clear out the descriptors we just ran. */
  442. mxs_dma_finish(chan, &tmp_desc_list);
  443. /* Shut the DMA channel down. */
  444. mxs_dma_ack_irq(chan);
  445. mxs_dma_reset(chan);
  446. mxs_dma_enable_irq(chan, 0);
  447. mxs_dma_disable(chan);
  448. return ret;
  449. }
  450. /*
  451. * Execute a continuously running circular DMA descriptor.
  452. * NOTE: This is not intended for general use, but rather
  453. * for the LCD driver in Smart-LCD mode. It allows
  454. * continuous triggering of the RUN bit there.
  455. */
  456. void mxs_dma_circ_start(int chan, struct mxs_dma_desc *pdesc)
  457. {
  458. struct mxs_apbh_regs *apbh_regs =
  459. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  460. mxs_dma_flush_desc(pdesc);
  461. mxs_dma_enable_irq(chan, 1);
  462. writel(mxs_dma_cmd_address(pdesc),
  463. &apbh_regs->ch[chan].hw_apbh_ch_nxtcmdar);
  464. writel(1, &apbh_regs->ch[chan].hw_apbh_ch_sema);
  465. writel(1 << (chan + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
  466. &apbh_regs->hw_apbh_ctrl0_clr);
  467. }
  468. /*
  469. * Initialize the DMA hardware
  470. */
  471. void mxs_dma_init(void)
  472. {
  473. struct mxs_apbh_regs *apbh_regs =
  474. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  475. mxs_reset_block(&apbh_regs->hw_apbh_ctrl0_reg);
  476. #ifdef CONFIG_APBH_DMA_BURST8
  477. writel(APBH_CTRL0_AHB_BURST8_EN,
  478. &apbh_regs->hw_apbh_ctrl0_set);
  479. #else
  480. writel(APBH_CTRL0_AHB_BURST8_EN,
  481. &apbh_regs->hw_apbh_ctrl0_clr);
  482. #endif
  483. #ifdef CONFIG_APBH_DMA_BURST
  484. writel(APBH_CTRL0_APB_BURST_EN,
  485. &apbh_regs->hw_apbh_ctrl0_set);
  486. #else
  487. writel(APBH_CTRL0_APB_BURST_EN,
  488. &apbh_regs->hw_apbh_ctrl0_clr);
  489. #endif
  490. }
  491. int mxs_dma_init_channel(int channel)
  492. {
  493. struct mxs_dma_chan *pchan;
  494. int ret;
  495. pchan = mxs_dma_channels + channel;
  496. pchan->flags = MXS_DMA_FLAGS_VALID;
  497. ret = mxs_dma_request(channel);
  498. if (ret) {
  499. printf("MXS DMA: Can't acquire DMA channel %i\n",
  500. channel);
  501. return ret;
  502. }
  503. mxs_dma_reset(channel);
  504. mxs_dma_ack_irq(channel);
  505. return 0;
  506. }