at_hdmac.c 62 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315
  1. /*
  2. * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
  3. *
  4. * Copyright (C) 2008 Atmel Corporation
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. *
  12. * This supports the Atmel AHB DMA Controller found in several Atmel SoCs.
  13. * The only Atmel DMA Controller that is not covered by this driver is the one
  14. * found on AT91SAM9263.
  15. */
  16. #include <dt-bindings/dma/at91.h>
  17. #include <linux/clk.h>
  18. #include <linux/dmaengine.h>
  19. #include <linux/dma-mapping.h>
  20. #include <linux/dmapool.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/module.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/slab.h>
  25. #include <linux/of.h>
  26. #include <linux/of_device.h>
  27. #include <linux/of_dma.h>
  28. #include "at_hdmac_regs.h"
  29. #include "dmaengine.h"
  30. /*
  31. * Glossary
  32. * --------
  33. *
  34. * at_hdmac : Name of the ATmel AHB DMA Controller
  35. * at_dma_ / atdma : ATmel DMA controller entity related
  36. * atc_ / atchan : ATmel DMA Channel entity related
  37. */
  38. #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
  39. #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
  40. |ATC_DIF(AT_DMA_MEM_IF))
  41. #define ATC_DMA_BUSWIDTHS\
  42. (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
  43. BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
  44. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
  45. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
  46. #define ATC_MAX_DSCR_TRIALS 10
  47. /*
  48. * Initial number of descriptors to allocate for each channel. This could
  49. * be increased during dma usage.
  50. */
  51. static unsigned int init_nr_desc_per_channel = 64;
  52. module_param(init_nr_desc_per_channel, uint, 0644);
  53. MODULE_PARM_DESC(init_nr_desc_per_channel,
  54. "initial descriptors per channel (default: 64)");
  55. /* prototypes */
  56. static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
  57. static void atc_issue_pending(struct dma_chan *chan);
  58. /*----------------------------------------------------------------------*/
  59. static inline unsigned int atc_get_xfer_width(dma_addr_t src, dma_addr_t dst,
  60. size_t len)
  61. {
  62. unsigned int width;
  63. if (!((src | dst | len) & 3))
  64. width = 2;
  65. else if (!((src | dst | len) & 1))
  66. width = 1;
  67. else
  68. width = 0;
  69. return width;
  70. }
  71. static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
  72. {
  73. return list_first_entry(&atchan->active_list,
  74. struct at_desc, desc_node);
  75. }
  76. static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
  77. {
  78. return list_first_entry(&atchan->queue,
  79. struct at_desc, desc_node);
  80. }
  81. /**
  82. * atc_alloc_descriptor - allocate and return an initialized descriptor
  83. * @chan: the channel to allocate descriptors for
  84. * @gfp_flags: GFP allocation flags
  85. *
  86. * Note: The ack-bit is positioned in the descriptor flag at creation time
  87. * to make initial allocation more convenient. This bit will be cleared
  88. * and control will be given to client at usage time (during
  89. * preparation functions).
  90. */
  91. static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
  92. gfp_t gfp_flags)
  93. {
  94. struct at_desc *desc = NULL;
  95. struct at_dma *atdma = to_at_dma(chan->device);
  96. dma_addr_t phys;
  97. desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
  98. if (desc) {
  99. memset(desc, 0, sizeof(struct at_desc));
  100. INIT_LIST_HEAD(&desc->tx_list);
  101. dma_async_tx_descriptor_init(&desc->txd, chan);
  102. /* txd.flags will be overwritten in prep functions */
  103. desc->txd.flags = DMA_CTRL_ACK;
  104. desc->txd.tx_submit = atc_tx_submit;
  105. desc->txd.phys = phys;
  106. }
  107. return desc;
  108. }
  109. /**
  110. * atc_desc_get - get an unused descriptor from free_list
  111. * @atchan: channel we want a new descriptor for
  112. */
  113. static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
  114. {
  115. struct at_desc *desc, *_desc;
  116. struct at_desc *ret = NULL;
  117. unsigned long flags;
  118. unsigned int i = 0;
  119. LIST_HEAD(tmp_list);
  120. spin_lock_irqsave(&atchan->lock, flags);
  121. list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
  122. i++;
  123. if (async_tx_test_ack(&desc->txd)) {
  124. list_del(&desc->desc_node);
  125. ret = desc;
  126. break;
  127. }
  128. dev_dbg(chan2dev(&atchan->chan_common),
  129. "desc %p not ACKed\n", desc);
  130. }
  131. spin_unlock_irqrestore(&atchan->lock, flags);
  132. dev_vdbg(chan2dev(&atchan->chan_common),
  133. "scanned %u descriptors on freelist\n", i);
  134. /* no more descriptor available in initial pool: create one more */
  135. if (!ret) {
  136. ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
  137. if (ret) {
  138. spin_lock_irqsave(&atchan->lock, flags);
  139. atchan->descs_allocated++;
  140. spin_unlock_irqrestore(&atchan->lock, flags);
  141. } else {
  142. dev_err(chan2dev(&atchan->chan_common),
  143. "not enough descriptors available\n");
  144. }
  145. }
  146. return ret;
  147. }
  148. /**
  149. * atc_desc_put - move a descriptor, including any children, to the free list
  150. * @atchan: channel we work on
  151. * @desc: descriptor, at the head of a chain, to move to free list
  152. */
  153. static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
  154. {
  155. if (desc) {
  156. struct at_desc *child;
  157. unsigned long flags;
  158. spin_lock_irqsave(&atchan->lock, flags);
  159. list_for_each_entry(child, &desc->tx_list, desc_node)
  160. dev_vdbg(chan2dev(&atchan->chan_common),
  161. "moving child desc %p to freelist\n",
  162. child);
  163. list_splice_init(&desc->tx_list, &atchan->free_list);
  164. dev_vdbg(chan2dev(&atchan->chan_common),
  165. "moving desc %p to freelist\n", desc);
  166. list_add(&desc->desc_node, &atchan->free_list);
  167. spin_unlock_irqrestore(&atchan->lock, flags);
  168. }
  169. }
  170. /**
  171. * atc_desc_chain - build chain adding a descriptor
  172. * @first: address of first descriptor of the chain
  173. * @prev: address of previous descriptor of the chain
  174. * @desc: descriptor to queue
  175. *
  176. * Called from prep_* functions
  177. */
  178. static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
  179. struct at_desc *desc)
  180. {
  181. if (!(*first)) {
  182. *first = desc;
  183. } else {
  184. /* inform the HW lli about chaining */
  185. (*prev)->lli.dscr = desc->txd.phys;
  186. /* insert the link descriptor to the LD ring */
  187. list_add_tail(&desc->desc_node,
  188. &(*first)->tx_list);
  189. }
  190. *prev = desc;
  191. }
  192. /**
  193. * atc_dostart - starts the DMA engine for real
  194. * @atchan: the channel we want to start
  195. * @first: first descriptor in the list we want to begin with
  196. *
  197. * Called with atchan->lock held and bh disabled
  198. */
  199. static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
  200. {
  201. struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
  202. /* ASSERT: channel is idle */
  203. if (atc_chan_is_enabled(atchan)) {
  204. dev_err(chan2dev(&atchan->chan_common),
  205. "BUG: Attempted to start non-idle channel\n");
  206. dev_err(chan2dev(&atchan->chan_common),
  207. " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
  208. channel_readl(atchan, SADDR),
  209. channel_readl(atchan, DADDR),
  210. channel_readl(atchan, CTRLA),
  211. channel_readl(atchan, CTRLB),
  212. channel_readl(atchan, DSCR));
  213. /* The tasklet will hopefully advance the queue... */
  214. return;
  215. }
  216. vdbg_dump_regs(atchan);
  217. channel_writel(atchan, SADDR, 0);
  218. channel_writel(atchan, DADDR, 0);
  219. channel_writel(atchan, CTRLA, 0);
  220. channel_writel(atchan, CTRLB, 0);
  221. channel_writel(atchan, DSCR, first->txd.phys);
  222. channel_writel(atchan, SPIP, ATC_SPIP_HOLE(first->src_hole) |
  223. ATC_SPIP_BOUNDARY(first->boundary));
  224. channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) |
  225. ATC_DPIP_BOUNDARY(first->boundary));
  226. dma_writel(atdma, CHER, atchan->mask);
  227. vdbg_dump_regs(atchan);
  228. }
  229. /*
  230. * atc_get_desc_by_cookie - get the descriptor of a cookie
  231. * @atchan: the DMA channel
  232. * @cookie: the cookie to get the descriptor for
  233. */
  234. static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
  235. dma_cookie_t cookie)
  236. {
  237. struct at_desc *desc, *_desc;
  238. list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) {
  239. if (desc->txd.cookie == cookie)
  240. return desc;
  241. }
  242. list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
  243. if (desc->txd.cookie == cookie)
  244. return desc;
  245. }
  246. return NULL;
  247. }
  248. /**
  249. * atc_calc_bytes_left - calculates the number of bytes left according to the
  250. * value read from CTRLA.
  251. *
  252. * @current_len: the number of bytes left before reading CTRLA
  253. * @ctrla: the value of CTRLA
  254. */
  255. static inline int atc_calc_bytes_left(int current_len, u32 ctrla)
  256. {
  257. u32 btsize = (ctrla & ATC_BTSIZE_MAX);
  258. u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla);
  259. /*
  260. * According to the datasheet, when reading the Control A Register
  261. * (ctrla), the Buffer Transfer Size (btsize) bitfield refers to the
  262. * number of transfers completed on the Source Interface.
  263. * So btsize is always a number of source width transfers.
  264. */
  265. return current_len - (btsize << src_width);
  266. }
  267. /**
  268. * atc_get_bytes_left - get the number of bytes residue for a cookie
  269. * @chan: DMA channel
  270. * @cookie: transaction identifier to check status of
  271. */
  272. static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
  273. {
  274. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  275. struct at_desc *desc_first = atc_first_active(atchan);
  276. struct at_desc *desc;
  277. int ret;
  278. u32 ctrla, dscr, trials;
  279. /*
  280. * If the cookie doesn't match to the currently running transfer then
  281. * we can return the total length of the associated DMA transfer,
  282. * because it is still queued.
  283. */
  284. desc = atc_get_desc_by_cookie(atchan, cookie);
  285. if (desc == NULL)
  286. return -EINVAL;
  287. else if (desc != desc_first)
  288. return desc->total_len;
  289. /* cookie matches to the currently running transfer */
  290. ret = desc_first->total_len;
  291. if (desc_first->lli.dscr) {
  292. /* hardware linked list transfer */
  293. /*
  294. * Calculate the residue by removing the length of the child
  295. * descriptors already transferred from the total length.
  296. * To get the current child descriptor we can use the value of
  297. * the channel's DSCR register and compare it against the value
  298. * of the hardware linked list structure of each child
  299. * descriptor.
  300. *
  301. * The CTRLA register provides us with the amount of data
  302. * already read from the source for the current child
  303. * descriptor. So we can compute a more accurate residue by also
  304. * removing the number of bytes corresponding to this amount of
  305. * data.
  306. *
  307. * However, the DSCR and CTRLA registers cannot be read both
  308. * atomically. Hence a race condition may occur: the first read
  309. * register may refer to one child descriptor whereas the second
  310. * read may refer to a later child descriptor in the list
  311. * because of the DMA transfer progression inbetween the two
  312. * reads.
  313. *
  314. * One solution could have been to pause the DMA transfer, read
  315. * the DSCR and CTRLA then resume the DMA transfer. Nonetheless,
  316. * this approach presents some drawbacks:
  317. * - If the DMA transfer is paused, RX overruns or TX underruns
  318. * are more likey to occur depending on the system latency.
  319. * Taking the USART driver as an example, it uses a cyclic DMA
  320. * transfer to read data from the Receive Holding Register
  321. * (RHR) to avoid RX overruns since the RHR is not protected
  322. * by any FIFO on most Atmel SoCs. So pausing the DMA transfer
  323. * to compute the residue would break the USART driver design.
  324. * - The atc_pause() function masks interrupts but we'd rather
  325. * avoid to do so for system latency purpose.
  326. *
  327. * Then we'd rather use another solution: the DSCR is read a
  328. * first time, the CTRLA is read in turn, next the DSCR is read
  329. * a second time. If the two consecutive read values of the DSCR
  330. * are the same then we assume both refers to the very same
  331. * child descriptor as well as the CTRLA value read inbetween
  332. * does. For cyclic tranfers, the assumption is that a full loop
  333. * is "not so fast".
  334. * If the two DSCR values are different, we read again the CTRLA
  335. * then the DSCR till two consecutive read values from DSCR are
  336. * equal or till the maxium trials is reach.
  337. * This algorithm is very unlikely not to find a stable value for
  338. * DSCR.
  339. */
  340. dscr = channel_readl(atchan, DSCR);
  341. rmb(); /* ensure DSCR is read before CTRLA */
  342. ctrla = channel_readl(atchan, CTRLA);
  343. for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
  344. u32 new_dscr;
  345. rmb(); /* ensure DSCR is read after CTRLA */
  346. new_dscr = channel_readl(atchan, DSCR);
  347. /*
  348. * If the DSCR register value has not changed inside the
  349. * DMA controller since the previous read, we assume
  350. * that both the dscr and ctrla values refers to the
  351. * very same descriptor.
  352. */
  353. if (likely(new_dscr == dscr))
  354. break;
  355. /*
  356. * DSCR has changed inside the DMA controller, so the
  357. * previouly read value of CTRLA may refer to an already
  358. * processed descriptor hence could be outdated.
  359. * We need to update ctrla to match the current
  360. * descriptor.
  361. */
  362. dscr = new_dscr;
  363. rmb(); /* ensure DSCR is read before CTRLA */
  364. ctrla = channel_readl(atchan, CTRLA);
  365. }
  366. if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
  367. return -ETIMEDOUT;
  368. /* for the first descriptor we can be more accurate */
  369. if (desc_first->lli.dscr == dscr)
  370. return atc_calc_bytes_left(ret, ctrla);
  371. ret -= desc_first->len;
  372. list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
  373. if (desc->lli.dscr == dscr)
  374. break;
  375. ret -= desc->len;
  376. }
  377. /*
  378. * For the current descriptor in the chain we can calculate
  379. * the remaining bytes using the channel's register.
  380. */
  381. ret = atc_calc_bytes_left(ret, ctrla);
  382. } else {
  383. /* single transfer */
  384. ctrla = channel_readl(atchan, CTRLA);
  385. ret = atc_calc_bytes_left(ret, ctrla);
  386. }
  387. return ret;
  388. }
  389. /**
  390. * atc_chain_complete - finish work for one transaction chain
  391. * @atchan: channel we work on
  392. * @desc: descriptor at the head of the chain we want do complete
  393. *
  394. * Called with atchan->lock held and bh disabled */
  395. static void
  396. atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
  397. {
  398. struct dma_async_tx_descriptor *txd = &desc->txd;
  399. struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
  400. dev_vdbg(chan2dev(&atchan->chan_common),
  401. "descriptor %u complete\n", txd->cookie);
  402. /* mark the descriptor as complete for non cyclic cases only */
  403. if (!atc_chan_is_cyclic(atchan))
  404. dma_cookie_complete(txd);
  405. /* If the transfer was a memset, free our temporary buffer */
  406. if (desc->memset_buffer) {
  407. dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
  408. desc->memset_paddr);
  409. desc->memset_buffer = false;
  410. }
  411. /* move children to free_list */
  412. list_splice_init(&desc->tx_list, &atchan->free_list);
  413. /* move myself to free_list */
  414. list_move(&desc->desc_node, &atchan->free_list);
  415. dma_descriptor_unmap(txd);
  416. /* for cyclic transfers,
  417. * no need to replay callback function while stopping */
  418. if (!atc_chan_is_cyclic(atchan)) {
  419. /*
  420. * The API requires that no submissions are done from a
  421. * callback, so we don't need to drop the lock here
  422. */
  423. dmaengine_desc_get_callback_invoke(txd, NULL);
  424. }
  425. dma_run_dependencies(txd);
  426. }
  427. /**
  428. * atc_complete_all - finish work for all transactions
  429. * @atchan: channel to complete transactions for
  430. *
  431. * Eventually submit queued descriptors if any
  432. *
  433. * Assume channel is idle while calling this function
  434. * Called with atchan->lock held and bh disabled
  435. */
  436. static void atc_complete_all(struct at_dma_chan *atchan)
  437. {
  438. struct at_desc *desc, *_desc;
  439. LIST_HEAD(list);
  440. dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
  441. /*
  442. * Submit queued descriptors ASAP, i.e. before we go through
  443. * the completed ones.
  444. */
  445. if (!list_empty(&atchan->queue))
  446. atc_dostart(atchan, atc_first_queued(atchan));
  447. /* empty active_list now it is completed */
  448. list_splice_init(&atchan->active_list, &list);
  449. /* empty queue list by moving descriptors (if any) to active_list */
  450. list_splice_init(&atchan->queue, &atchan->active_list);
  451. list_for_each_entry_safe(desc, _desc, &list, desc_node)
  452. atc_chain_complete(atchan, desc);
  453. }
  454. /**
  455. * atc_advance_work - at the end of a transaction, move forward
  456. * @atchan: channel where the transaction ended
  457. *
  458. * Called with atchan->lock held and bh disabled
  459. */
  460. static void atc_advance_work(struct at_dma_chan *atchan)
  461. {
  462. dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
  463. if (atc_chan_is_enabled(atchan))
  464. return;
  465. if (list_empty(&atchan->active_list) ||
  466. list_is_singular(&atchan->active_list)) {
  467. atc_complete_all(atchan);
  468. } else {
  469. atc_chain_complete(atchan, atc_first_active(atchan));
  470. /* advance work */
  471. atc_dostart(atchan, atc_first_active(atchan));
  472. }
  473. }
  474. /**
  475. * atc_handle_error - handle errors reported by DMA controller
  476. * @atchan: channel where error occurs
  477. *
  478. * Called with atchan->lock held and bh disabled
  479. */
  480. static void atc_handle_error(struct at_dma_chan *atchan)
  481. {
  482. struct at_desc *bad_desc;
  483. struct at_desc *child;
  484. /*
  485. * The descriptor currently at the head of the active list is
  486. * broked. Since we don't have any way to report errors, we'll
  487. * just have to scream loudly and try to carry on.
  488. */
  489. bad_desc = atc_first_active(atchan);
  490. list_del_init(&bad_desc->desc_node);
  491. /* As we are stopped, take advantage to push queued descriptors
  492. * in active_list */
  493. list_splice_init(&atchan->queue, atchan->active_list.prev);
  494. /* Try to restart the controller */
  495. if (!list_empty(&atchan->active_list))
  496. atc_dostart(atchan, atc_first_active(atchan));
  497. /*
  498. * KERN_CRITICAL may seem harsh, but since this only happens
  499. * when someone submits a bad physical address in a
  500. * descriptor, we should consider ourselves lucky that the
  501. * controller flagged an error instead of scribbling over
  502. * random memory locations.
  503. */
  504. dev_crit(chan2dev(&atchan->chan_common),
  505. "Bad descriptor submitted for DMA!\n");
  506. dev_crit(chan2dev(&atchan->chan_common),
  507. " cookie: %d\n", bad_desc->txd.cookie);
  508. atc_dump_lli(atchan, &bad_desc->lli);
  509. list_for_each_entry(child, &bad_desc->tx_list, desc_node)
  510. atc_dump_lli(atchan, &child->lli);
  511. /* Pretend the descriptor completed successfully */
  512. atc_chain_complete(atchan, bad_desc);
  513. }
  514. /**
  515. * atc_handle_cyclic - at the end of a period, run callback function
  516. * @atchan: channel used for cyclic operations
  517. *
  518. * Called with atchan->lock held and bh disabled
  519. */
  520. static void atc_handle_cyclic(struct at_dma_chan *atchan)
  521. {
  522. struct at_desc *first = atc_first_active(atchan);
  523. struct dma_async_tx_descriptor *txd = &first->txd;
  524. dev_vdbg(chan2dev(&atchan->chan_common),
  525. "new cyclic period llp 0x%08x\n",
  526. channel_readl(atchan, DSCR));
  527. dmaengine_desc_get_callback_invoke(txd, NULL);
  528. }
  529. /*-- IRQ & Tasklet ---------------------------------------------------*/
  530. static void atc_tasklet(unsigned long data)
  531. {
  532. struct at_dma_chan *atchan = (struct at_dma_chan *)data;
  533. unsigned long flags;
  534. spin_lock_irqsave(&atchan->lock, flags);
  535. if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
  536. atc_handle_error(atchan);
  537. else if (atc_chan_is_cyclic(atchan))
  538. atc_handle_cyclic(atchan);
  539. else
  540. atc_advance_work(atchan);
  541. spin_unlock_irqrestore(&atchan->lock, flags);
  542. }
  543. static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
  544. {
  545. struct at_dma *atdma = (struct at_dma *)dev_id;
  546. struct at_dma_chan *atchan;
  547. int i;
  548. u32 status, pending, imr;
  549. int ret = IRQ_NONE;
  550. do {
  551. imr = dma_readl(atdma, EBCIMR);
  552. status = dma_readl(atdma, EBCISR);
  553. pending = status & imr;
  554. if (!pending)
  555. break;
  556. dev_vdbg(atdma->dma_common.dev,
  557. "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
  558. status, imr, pending);
  559. for (i = 0; i < atdma->dma_common.chancnt; i++) {
  560. atchan = &atdma->chan[i];
  561. if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
  562. if (pending & AT_DMA_ERR(i)) {
  563. /* Disable channel on AHB error */
  564. dma_writel(atdma, CHDR,
  565. AT_DMA_RES(i) | atchan->mask);
  566. /* Give information to tasklet */
  567. set_bit(ATC_IS_ERROR, &atchan->status);
  568. }
  569. tasklet_schedule(&atchan->tasklet);
  570. ret = IRQ_HANDLED;
  571. }
  572. }
  573. } while (pending);
  574. return ret;
  575. }
  576. /*-- DMA Engine API --------------------------------------------------*/
  577. /**
  578. * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
  579. * @desc: descriptor at the head of the transaction chain
  580. *
  581. * Queue chain if DMA engine is working already
  582. *
  583. * Cookie increment and adding to active_list or queue must be atomic
  584. */
  585. static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
  586. {
  587. struct at_desc *desc = txd_to_at_desc(tx);
  588. struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
  589. dma_cookie_t cookie;
  590. unsigned long flags;
  591. spin_lock_irqsave(&atchan->lock, flags);
  592. cookie = dma_cookie_assign(tx);
  593. if (list_empty(&atchan->active_list)) {
  594. dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
  595. desc->txd.cookie);
  596. atc_dostart(atchan, desc);
  597. list_add_tail(&desc->desc_node, &atchan->active_list);
  598. } else {
  599. dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
  600. desc->txd.cookie);
  601. list_add_tail(&desc->desc_node, &atchan->queue);
  602. }
  603. spin_unlock_irqrestore(&atchan->lock, flags);
  604. return cookie;
  605. }
  606. /**
  607. * atc_prep_dma_interleaved - prepare memory to memory interleaved operation
  608. * @chan: the channel to prepare operation on
  609. * @xt: Interleaved transfer template
  610. * @flags: tx descriptor status flags
  611. */
  612. static struct dma_async_tx_descriptor *
  613. atc_prep_dma_interleaved(struct dma_chan *chan,
  614. struct dma_interleaved_template *xt,
  615. unsigned long flags)
  616. {
  617. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  618. struct data_chunk *first = xt->sgl;
  619. struct at_desc *desc = NULL;
  620. size_t xfer_count;
  621. unsigned int dwidth;
  622. u32 ctrla;
  623. u32 ctrlb;
  624. size_t len = 0;
  625. int i;
  626. if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
  627. return NULL;
  628. dev_info(chan2dev(chan),
  629. "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
  630. __func__, &xt->src_start, &xt->dst_start, xt->numf,
  631. xt->frame_size, flags);
  632. /*
  633. * The controller can only "skip" X bytes every Y bytes, so we
  634. * need to make sure we are given a template that fit that
  635. * description, ie a template with chunks that always have the
  636. * same size, with the same ICGs.
  637. */
  638. for (i = 0; i < xt->frame_size; i++) {
  639. struct data_chunk *chunk = xt->sgl + i;
  640. if ((chunk->size != xt->sgl->size) ||
  641. (dmaengine_get_dst_icg(xt, chunk) != dmaengine_get_dst_icg(xt, first)) ||
  642. (dmaengine_get_src_icg(xt, chunk) != dmaengine_get_src_icg(xt, first))) {
  643. dev_err(chan2dev(chan),
  644. "%s: the controller can transfer only identical chunks\n",
  645. __func__);
  646. return NULL;
  647. }
  648. len += chunk->size;
  649. }
  650. dwidth = atc_get_xfer_width(xt->src_start,
  651. xt->dst_start, len);
  652. xfer_count = len >> dwidth;
  653. if (xfer_count > ATC_BTSIZE_MAX) {
  654. dev_err(chan2dev(chan), "%s: buffer is too big\n", __func__);
  655. return NULL;
  656. }
  657. ctrla = ATC_SRC_WIDTH(dwidth) |
  658. ATC_DST_WIDTH(dwidth);
  659. ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
  660. | ATC_SRC_ADDR_MODE_INCR
  661. | ATC_DST_ADDR_MODE_INCR
  662. | ATC_SRC_PIP
  663. | ATC_DST_PIP
  664. | ATC_FC_MEM2MEM;
  665. /* create the transfer */
  666. desc = atc_desc_get(atchan);
  667. if (!desc) {
  668. dev_err(chan2dev(chan),
  669. "%s: couldn't allocate our descriptor\n", __func__);
  670. return NULL;
  671. }
  672. desc->lli.saddr = xt->src_start;
  673. desc->lli.daddr = xt->dst_start;
  674. desc->lli.ctrla = ctrla | xfer_count;
  675. desc->lli.ctrlb = ctrlb;
  676. desc->boundary = first->size >> dwidth;
  677. desc->dst_hole = (dmaengine_get_dst_icg(xt, first) >> dwidth) + 1;
  678. desc->src_hole = (dmaengine_get_src_icg(xt, first) >> dwidth) + 1;
  679. desc->txd.cookie = -EBUSY;
  680. desc->total_len = desc->len = len;
  681. /* set end-of-link to the last link descriptor of list*/
  682. set_desc_eol(desc);
  683. desc->txd.flags = flags; /* client is in control of this ack */
  684. return &desc->txd;
  685. }
  686. /**
  687. * atc_prep_dma_memcpy - prepare a memcpy operation
  688. * @chan: the channel to prepare operation on
  689. * @dest: operation virtual destination address
  690. * @src: operation virtual source address
  691. * @len: operation length
  692. * @flags: tx descriptor status flags
  693. */
  694. static struct dma_async_tx_descriptor *
  695. atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
  696. size_t len, unsigned long flags)
  697. {
  698. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  699. struct at_desc *desc = NULL;
  700. struct at_desc *first = NULL;
  701. struct at_desc *prev = NULL;
  702. size_t xfer_count;
  703. size_t offset;
  704. unsigned int src_width;
  705. unsigned int dst_width;
  706. u32 ctrla;
  707. u32 ctrlb;
  708. dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n",
  709. &dest, &src, len, flags);
  710. if (unlikely(!len)) {
  711. dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
  712. return NULL;
  713. }
  714. ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
  715. | ATC_SRC_ADDR_MODE_INCR
  716. | ATC_DST_ADDR_MODE_INCR
  717. | ATC_FC_MEM2MEM;
  718. /*
  719. * We can be a lot more clever here, but this should take care
  720. * of the most common optimization.
  721. */
  722. src_width = dst_width = atc_get_xfer_width(src, dest, len);
  723. ctrla = ATC_SRC_WIDTH(src_width) |
  724. ATC_DST_WIDTH(dst_width);
  725. for (offset = 0; offset < len; offset += xfer_count << src_width) {
  726. xfer_count = min_t(size_t, (len - offset) >> src_width,
  727. ATC_BTSIZE_MAX);
  728. desc = atc_desc_get(atchan);
  729. if (!desc)
  730. goto err_desc_get;
  731. desc->lli.saddr = src + offset;
  732. desc->lli.daddr = dest + offset;
  733. desc->lli.ctrla = ctrla | xfer_count;
  734. desc->lli.ctrlb = ctrlb;
  735. desc->txd.cookie = 0;
  736. desc->len = xfer_count << src_width;
  737. atc_desc_chain(&first, &prev, desc);
  738. }
  739. /* First descriptor of the chain embedds additional information */
  740. first->txd.cookie = -EBUSY;
  741. first->total_len = len;
  742. /* set end-of-link to the last link descriptor of list*/
  743. set_desc_eol(desc);
  744. first->txd.flags = flags; /* client is in control of this ack */
  745. return &first->txd;
  746. err_desc_get:
  747. atc_desc_put(atchan, first);
  748. return NULL;
  749. }
  750. static struct at_desc *atc_create_memset_desc(struct dma_chan *chan,
  751. dma_addr_t psrc,
  752. dma_addr_t pdst,
  753. size_t len)
  754. {
  755. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  756. struct at_desc *desc;
  757. size_t xfer_count;
  758. u32 ctrla = ATC_SRC_WIDTH(2) | ATC_DST_WIDTH(2);
  759. u32 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN |
  760. ATC_SRC_ADDR_MODE_FIXED |
  761. ATC_DST_ADDR_MODE_INCR |
  762. ATC_FC_MEM2MEM;
  763. xfer_count = len >> 2;
  764. if (xfer_count > ATC_BTSIZE_MAX) {
  765. dev_err(chan2dev(chan), "%s: buffer is too big\n",
  766. __func__);
  767. return NULL;
  768. }
  769. desc = atc_desc_get(atchan);
  770. if (!desc) {
  771. dev_err(chan2dev(chan), "%s: can't get a descriptor\n",
  772. __func__);
  773. return NULL;
  774. }
  775. desc->lli.saddr = psrc;
  776. desc->lli.daddr = pdst;
  777. desc->lli.ctrla = ctrla | xfer_count;
  778. desc->lli.ctrlb = ctrlb;
  779. desc->txd.cookie = 0;
  780. desc->len = len;
  781. return desc;
  782. }
  783. /**
  784. * atc_prep_dma_memset - prepare a memcpy operation
  785. * @chan: the channel to prepare operation on
  786. * @dest: operation virtual destination address
  787. * @value: value to set memory buffer to
  788. * @len: operation length
  789. * @flags: tx descriptor status flags
  790. */
  791. static struct dma_async_tx_descriptor *
  792. atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
  793. size_t len, unsigned long flags)
  794. {
  795. struct at_dma *atdma = to_at_dma(chan->device);
  796. struct at_desc *desc;
  797. void __iomem *vaddr;
  798. dma_addr_t paddr;
  799. dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__,
  800. &dest, value, len, flags);
  801. if (unlikely(!len)) {
  802. dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
  803. return NULL;
  804. }
  805. if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
  806. dev_dbg(chan2dev(chan), "%s: buffer is not aligned\n",
  807. __func__);
  808. return NULL;
  809. }
  810. vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr);
  811. if (!vaddr) {
  812. dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
  813. __func__);
  814. return NULL;
  815. }
  816. *(u32*)vaddr = value;
  817. desc = atc_create_memset_desc(chan, paddr, dest, len);
  818. if (!desc) {
  819. dev_err(chan2dev(chan), "%s: couldn't get a descriptor\n",
  820. __func__);
  821. goto err_free_buffer;
  822. }
  823. desc->memset_paddr = paddr;
  824. desc->memset_vaddr = vaddr;
  825. desc->memset_buffer = true;
  826. desc->txd.cookie = -EBUSY;
  827. desc->total_len = len;
  828. /* set end-of-link on the descriptor */
  829. set_desc_eol(desc);
  830. desc->txd.flags = flags;
  831. return &desc->txd;
  832. err_free_buffer:
  833. dma_pool_free(atdma->memset_pool, vaddr, paddr);
  834. return NULL;
  835. }
  836. static struct dma_async_tx_descriptor *
  837. atc_prep_dma_memset_sg(struct dma_chan *chan,
  838. struct scatterlist *sgl,
  839. unsigned int sg_len, int value,
  840. unsigned long flags)
  841. {
  842. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  843. struct at_dma *atdma = to_at_dma(chan->device);
  844. struct at_desc *desc = NULL, *first = NULL, *prev = NULL;
  845. struct scatterlist *sg;
  846. void __iomem *vaddr;
  847. dma_addr_t paddr;
  848. size_t total_len = 0;
  849. int i;
  850. dev_vdbg(chan2dev(chan), "%s: v0x%x l0x%zx f0x%lx\n", __func__,
  851. value, sg_len, flags);
  852. if (unlikely(!sgl || !sg_len)) {
  853. dev_dbg(chan2dev(chan), "%s: scatterlist is empty!\n",
  854. __func__);
  855. return NULL;
  856. }
  857. vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr);
  858. if (!vaddr) {
  859. dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
  860. __func__);
  861. return NULL;
  862. }
  863. *(u32*)vaddr = value;
  864. for_each_sg(sgl, sg, sg_len, i) {
  865. dma_addr_t dest = sg_dma_address(sg);
  866. size_t len = sg_dma_len(sg);
  867. dev_vdbg(chan2dev(chan), "%s: d%pad, l0x%zx\n",
  868. __func__, &dest, len);
  869. if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
  870. dev_err(chan2dev(chan), "%s: buffer is not aligned\n",
  871. __func__);
  872. goto err_put_desc;
  873. }
  874. desc = atc_create_memset_desc(chan, paddr, dest, len);
  875. if (!desc)
  876. goto err_put_desc;
  877. atc_desc_chain(&first, &prev, desc);
  878. total_len += len;
  879. }
  880. /*
  881. * Only set the buffer pointers on the last descriptor to
  882. * avoid free'ing while we have our transfer still going
  883. */
  884. desc->memset_paddr = paddr;
  885. desc->memset_vaddr = vaddr;
  886. desc->memset_buffer = true;
  887. first->txd.cookie = -EBUSY;
  888. first->total_len = total_len;
  889. /* set end-of-link on the descriptor */
  890. set_desc_eol(desc);
  891. first->txd.flags = flags;
  892. return &first->txd;
  893. err_put_desc:
  894. atc_desc_put(atchan, first);
  895. return NULL;
  896. }
  897. /**
  898. * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
  899. * @chan: DMA channel
  900. * @sgl: scatterlist to transfer to/from
  901. * @sg_len: number of entries in @scatterlist
  902. * @direction: DMA direction
  903. * @flags: tx descriptor status flags
  904. * @context: transaction context (ignored)
  905. */
  906. static struct dma_async_tx_descriptor *
  907. atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
  908. unsigned int sg_len, enum dma_transfer_direction direction,
  909. unsigned long flags, void *context)
  910. {
  911. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  912. struct at_dma_slave *atslave = chan->private;
  913. struct dma_slave_config *sconfig = &atchan->dma_sconfig;
  914. struct at_desc *first = NULL;
  915. struct at_desc *prev = NULL;
  916. u32 ctrla;
  917. u32 ctrlb;
  918. dma_addr_t reg;
  919. unsigned int reg_width;
  920. unsigned int mem_width;
  921. unsigned int i;
  922. struct scatterlist *sg;
  923. size_t total_len = 0;
  924. dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
  925. sg_len,
  926. direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
  927. flags);
  928. if (unlikely(!atslave || !sg_len)) {
  929. dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
  930. return NULL;
  931. }
  932. ctrla = ATC_SCSIZE(sconfig->src_maxburst)
  933. | ATC_DCSIZE(sconfig->dst_maxburst);
  934. ctrlb = ATC_IEN;
  935. switch (direction) {
  936. case DMA_MEM_TO_DEV:
  937. reg_width = convert_buswidth(sconfig->dst_addr_width);
  938. ctrla |= ATC_DST_WIDTH(reg_width);
  939. ctrlb |= ATC_DST_ADDR_MODE_FIXED
  940. | ATC_SRC_ADDR_MODE_INCR
  941. | ATC_FC_MEM2PER
  942. | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if);
  943. reg = sconfig->dst_addr;
  944. for_each_sg(sgl, sg, sg_len, i) {
  945. struct at_desc *desc;
  946. u32 len;
  947. u32 mem;
  948. desc = atc_desc_get(atchan);
  949. if (!desc)
  950. goto err_desc_get;
  951. mem = sg_dma_address(sg);
  952. len = sg_dma_len(sg);
  953. if (unlikely(!len)) {
  954. dev_dbg(chan2dev(chan),
  955. "prep_slave_sg: sg(%d) data length is zero\n", i);
  956. goto err;
  957. }
  958. mem_width = 2;
  959. if (unlikely(mem & 3 || len & 3))
  960. mem_width = 0;
  961. desc->lli.saddr = mem;
  962. desc->lli.daddr = reg;
  963. desc->lli.ctrla = ctrla
  964. | ATC_SRC_WIDTH(mem_width)
  965. | len >> mem_width;
  966. desc->lli.ctrlb = ctrlb;
  967. desc->len = len;
  968. atc_desc_chain(&first, &prev, desc);
  969. total_len += len;
  970. }
  971. break;
  972. case DMA_DEV_TO_MEM:
  973. reg_width = convert_buswidth(sconfig->src_addr_width);
  974. ctrla |= ATC_SRC_WIDTH(reg_width);
  975. ctrlb |= ATC_DST_ADDR_MODE_INCR
  976. | ATC_SRC_ADDR_MODE_FIXED
  977. | ATC_FC_PER2MEM
  978. | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if);
  979. reg = sconfig->src_addr;
  980. for_each_sg(sgl, sg, sg_len, i) {
  981. struct at_desc *desc;
  982. u32 len;
  983. u32 mem;
  984. desc = atc_desc_get(atchan);
  985. if (!desc)
  986. goto err_desc_get;
  987. mem = sg_dma_address(sg);
  988. len = sg_dma_len(sg);
  989. if (unlikely(!len)) {
  990. dev_dbg(chan2dev(chan),
  991. "prep_slave_sg: sg(%d) data length is zero\n", i);
  992. goto err;
  993. }
  994. mem_width = 2;
  995. if (unlikely(mem & 3 || len & 3))
  996. mem_width = 0;
  997. desc->lli.saddr = reg;
  998. desc->lli.daddr = mem;
  999. desc->lli.ctrla = ctrla
  1000. | ATC_DST_WIDTH(mem_width)
  1001. | len >> reg_width;
  1002. desc->lli.ctrlb = ctrlb;
  1003. desc->len = len;
  1004. atc_desc_chain(&first, &prev, desc);
  1005. total_len += len;
  1006. }
  1007. break;
  1008. default:
  1009. return NULL;
  1010. }
  1011. /* set end-of-link to the last link descriptor of list*/
  1012. set_desc_eol(prev);
  1013. /* First descriptor of the chain embedds additional information */
  1014. first->txd.cookie = -EBUSY;
  1015. first->total_len = total_len;
  1016. /* first link descriptor of list is responsible of flags */
  1017. first->txd.flags = flags; /* client is in control of this ack */
  1018. return &first->txd;
  1019. err_desc_get:
  1020. dev_err(chan2dev(chan), "not enough descriptors available\n");
  1021. err:
  1022. atc_desc_put(atchan, first);
  1023. return NULL;
  1024. }
  1025. /**
  1026. * atc_prep_dma_sg - prepare memory to memory scather-gather operation
  1027. * @chan: the channel to prepare operation on
  1028. * @dst_sg: destination scatterlist
  1029. * @dst_nents: number of destination scatterlist entries
  1030. * @src_sg: source scatterlist
  1031. * @src_nents: number of source scatterlist entries
  1032. * @flags: tx descriptor status flags
  1033. */
  1034. static struct dma_async_tx_descriptor *
  1035. atc_prep_dma_sg(struct dma_chan *chan,
  1036. struct scatterlist *dst_sg, unsigned int dst_nents,
  1037. struct scatterlist *src_sg, unsigned int src_nents,
  1038. unsigned long flags)
  1039. {
  1040. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  1041. struct at_desc *desc = NULL;
  1042. struct at_desc *first = NULL;
  1043. struct at_desc *prev = NULL;
  1044. unsigned int src_width;
  1045. unsigned int dst_width;
  1046. size_t xfer_count;
  1047. u32 ctrla;
  1048. u32 ctrlb;
  1049. size_t dst_len = 0, src_len = 0;
  1050. dma_addr_t dst = 0, src = 0;
  1051. size_t len = 0, total_len = 0;
  1052. if (unlikely(dst_nents == 0 || src_nents == 0))
  1053. return NULL;
  1054. if (unlikely(dst_sg == NULL || src_sg == NULL))
  1055. return NULL;
  1056. ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
  1057. | ATC_SRC_ADDR_MODE_INCR
  1058. | ATC_DST_ADDR_MODE_INCR
  1059. | ATC_FC_MEM2MEM;
  1060. /*
  1061. * loop until there is either no more source or no more destination
  1062. * scatterlist entry
  1063. */
  1064. while (true) {
  1065. /* prepare the next transfer */
  1066. if (dst_len == 0) {
  1067. /* no more destination scatterlist entries */
  1068. if (!dst_sg || !dst_nents)
  1069. break;
  1070. dst = sg_dma_address(dst_sg);
  1071. dst_len = sg_dma_len(dst_sg);
  1072. dst_sg = sg_next(dst_sg);
  1073. dst_nents--;
  1074. }
  1075. if (src_len == 0) {
  1076. /* no more source scatterlist entries */
  1077. if (!src_sg || !src_nents)
  1078. break;
  1079. src = sg_dma_address(src_sg);
  1080. src_len = sg_dma_len(src_sg);
  1081. src_sg = sg_next(src_sg);
  1082. src_nents--;
  1083. }
  1084. len = min_t(size_t, src_len, dst_len);
  1085. if (len == 0)
  1086. continue;
  1087. /* take care for the alignment */
  1088. src_width = dst_width = atc_get_xfer_width(src, dst, len);
  1089. ctrla = ATC_SRC_WIDTH(src_width) |
  1090. ATC_DST_WIDTH(dst_width);
  1091. /*
  1092. * The number of transfers to set up refer to the source width
  1093. * that depends on the alignment.
  1094. */
  1095. xfer_count = len >> src_width;
  1096. if (xfer_count > ATC_BTSIZE_MAX) {
  1097. xfer_count = ATC_BTSIZE_MAX;
  1098. len = ATC_BTSIZE_MAX << src_width;
  1099. }
  1100. /* create the transfer */
  1101. desc = atc_desc_get(atchan);
  1102. if (!desc)
  1103. goto err_desc_get;
  1104. desc->lli.saddr = src;
  1105. desc->lli.daddr = dst;
  1106. desc->lli.ctrla = ctrla | xfer_count;
  1107. desc->lli.ctrlb = ctrlb;
  1108. desc->txd.cookie = 0;
  1109. desc->len = len;
  1110. atc_desc_chain(&first, &prev, desc);
  1111. /* update the lengths and addresses for the next loop cycle */
  1112. dst_len -= len;
  1113. src_len -= len;
  1114. dst += len;
  1115. src += len;
  1116. total_len += len;
  1117. }
  1118. /* First descriptor of the chain embedds additional information */
  1119. first->txd.cookie = -EBUSY;
  1120. first->total_len = total_len;
  1121. /* set end-of-link to the last link descriptor of list*/
  1122. set_desc_eol(desc);
  1123. first->txd.flags = flags; /* client is in control of this ack */
  1124. return &first->txd;
  1125. err_desc_get:
  1126. atc_desc_put(atchan, first);
  1127. return NULL;
  1128. }
  1129. /**
  1130. * atc_dma_cyclic_check_values
  1131. * Check for too big/unaligned periods and unaligned DMA buffer
  1132. */
  1133. static int
  1134. atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
  1135. size_t period_len)
  1136. {
  1137. if (period_len > (ATC_BTSIZE_MAX << reg_width))
  1138. goto err_out;
  1139. if (unlikely(period_len & ((1 << reg_width) - 1)))
  1140. goto err_out;
  1141. if (unlikely(buf_addr & ((1 << reg_width) - 1)))
  1142. goto err_out;
  1143. return 0;
  1144. err_out:
  1145. return -EINVAL;
  1146. }
  1147. /**
  1148. * atc_dma_cyclic_fill_desc - Fill one period descriptor
  1149. */
  1150. static int
  1151. atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
  1152. unsigned int period_index, dma_addr_t buf_addr,
  1153. unsigned int reg_width, size_t period_len,
  1154. enum dma_transfer_direction direction)
  1155. {
  1156. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  1157. struct dma_slave_config *sconfig = &atchan->dma_sconfig;
  1158. u32 ctrla;
  1159. /* prepare common CRTLA value */
  1160. ctrla = ATC_SCSIZE(sconfig->src_maxburst)
  1161. | ATC_DCSIZE(sconfig->dst_maxburst)
  1162. | ATC_DST_WIDTH(reg_width)
  1163. | ATC_SRC_WIDTH(reg_width)
  1164. | period_len >> reg_width;
  1165. switch (direction) {
  1166. case DMA_MEM_TO_DEV:
  1167. desc->lli.saddr = buf_addr + (period_len * period_index);
  1168. desc->lli.daddr = sconfig->dst_addr;
  1169. desc->lli.ctrla = ctrla;
  1170. desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
  1171. | ATC_SRC_ADDR_MODE_INCR
  1172. | ATC_FC_MEM2PER
  1173. | ATC_SIF(atchan->mem_if)
  1174. | ATC_DIF(atchan->per_if);
  1175. desc->len = period_len;
  1176. break;
  1177. case DMA_DEV_TO_MEM:
  1178. desc->lli.saddr = sconfig->src_addr;
  1179. desc->lli.daddr = buf_addr + (period_len * period_index);
  1180. desc->lli.ctrla = ctrla;
  1181. desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
  1182. | ATC_SRC_ADDR_MODE_FIXED
  1183. | ATC_FC_PER2MEM
  1184. | ATC_SIF(atchan->per_if)
  1185. | ATC_DIF(atchan->mem_if);
  1186. desc->len = period_len;
  1187. break;
  1188. default:
  1189. return -EINVAL;
  1190. }
  1191. return 0;
  1192. }
  1193. /**
  1194. * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
  1195. * @chan: the DMA channel to prepare
  1196. * @buf_addr: physical DMA address where the buffer starts
  1197. * @buf_len: total number of bytes for the entire buffer
  1198. * @period_len: number of bytes for each period
  1199. * @direction: transfer direction, to or from device
  1200. * @flags: tx descriptor status flags
  1201. */
  1202. static struct dma_async_tx_descriptor *
  1203. atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
  1204. size_t period_len, enum dma_transfer_direction direction,
  1205. unsigned long flags)
  1206. {
  1207. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  1208. struct at_dma_slave *atslave = chan->private;
  1209. struct dma_slave_config *sconfig = &atchan->dma_sconfig;
  1210. struct at_desc *first = NULL;
  1211. struct at_desc *prev = NULL;
  1212. unsigned long was_cyclic;
  1213. unsigned int reg_width;
  1214. unsigned int periods = buf_len / period_len;
  1215. unsigned int i;
  1216. dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n",
  1217. direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
  1218. &buf_addr,
  1219. periods, buf_len, period_len);
  1220. if (unlikely(!atslave || !buf_len || !period_len)) {
  1221. dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
  1222. return NULL;
  1223. }
  1224. was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
  1225. if (was_cyclic) {
  1226. dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
  1227. return NULL;
  1228. }
  1229. if (unlikely(!is_slave_direction(direction)))
  1230. goto err_out;
  1231. if (sconfig->direction == DMA_MEM_TO_DEV)
  1232. reg_width = convert_buswidth(sconfig->dst_addr_width);
  1233. else
  1234. reg_width = convert_buswidth(sconfig->src_addr_width);
  1235. /* Check for too big/unaligned periods and unaligned DMA buffer */
  1236. if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len))
  1237. goto err_out;
  1238. /* build cyclic linked list */
  1239. for (i = 0; i < periods; i++) {
  1240. struct at_desc *desc;
  1241. desc = atc_desc_get(atchan);
  1242. if (!desc)
  1243. goto err_desc_get;
  1244. if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
  1245. reg_width, period_len, direction))
  1246. goto err_desc_get;
  1247. atc_desc_chain(&first, &prev, desc);
  1248. }
  1249. /* lets make a cyclic list */
  1250. prev->lli.dscr = first->txd.phys;
  1251. /* First descriptor of the chain embedds additional information */
  1252. first->txd.cookie = -EBUSY;
  1253. first->total_len = buf_len;
  1254. return &first->txd;
  1255. err_desc_get:
  1256. dev_err(chan2dev(chan), "not enough descriptors available\n");
  1257. atc_desc_put(atchan, first);
  1258. err_out:
  1259. clear_bit(ATC_IS_CYCLIC, &atchan->status);
  1260. return NULL;
  1261. }
  1262. static int atc_config(struct dma_chan *chan,
  1263. struct dma_slave_config *sconfig)
  1264. {
  1265. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  1266. dev_vdbg(chan2dev(chan), "%s\n", __func__);
  1267. /* Check if it is chan is configured for slave transfers */
  1268. if (!chan->private)
  1269. return -EINVAL;
  1270. memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
  1271. convert_burst(&atchan->dma_sconfig.src_maxburst);
  1272. convert_burst(&atchan->dma_sconfig.dst_maxburst);
  1273. return 0;
  1274. }
  1275. static int atc_pause(struct dma_chan *chan)
  1276. {
  1277. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  1278. struct at_dma *atdma = to_at_dma(chan->device);
  1279. int chan_id = atchan->chan_common.chan_id;
  1280. unsigned long flags;
  1281. LIST_HEAD(list);
  1282. dev_vdbg(chan2dev(chan), "%s\n", __func__);
  1283. spin_lock_irqsave(&atchan->lock, flags);
  1284. dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
  1285. set_bit(ATC_IS_PAUSED, &atchan->status);
  1286. spin_unlock_irqrestore(&atchan->lock, flags);
  1287. return 0;
  1288. }
  1289. static int atc_resume(struct dma_chan *chan)
  1290. {
  1291. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  1292. struct at_dma *atdma = to_at_dma(chan->device);
  1293. int chan_id = atchan->chan_common.chan_id;
  1294. unsigned long flags;
  1295. LIST_HEAD(list);
  1296. dev_vdbg(chan2dev(chan), "%s\n", __func__);
  1297. if (!atc_chan_is_paused(atchan))
  1298. return 0;
  1299. spin_lock_irqsave(&atchan->lock, flags);
  1300. dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
  1301. clear_bit(ATC_IS_PAUSED, &atchan->status);
  1302. spin_unlock_irqrestore(&atchan->lock, flags);
  1303. return 0;
  1304. }
  1305. static int atc_terminate_all(struct dma_chan *chan)
  1306. {
  1307. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  1308. struct at_dma *atdma = to_at_dma(chan->device);
  1309. int chan_id = atchan->chan_common.chan_id;
  1310. struct at_desc *desc, *_desc;
  1311. unsigned long flags;
  1312. LIST_HEAD(list);
  1313. dev_vdbg(chan2dev(chan), "%s\n", __func__);
  1314. /*
  1315. * This is only called when something went wrong elsewhere, so
  1316. * we don't really care about the data. Just disable the
  1317. * channel. We still have to poll the channel enable bit due
  1318. * to AHB/HSB limitations.
  1319. */
  1320. spin_lock_irqsave(&atchan->lock, flags);
  1321. /* disabling channel: must also remove suspend state */
  1322. dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
  1323. /* confirm that this channel is disabled */
  1324. while (dma_readl(atdma, CHSR) & atchan->mask)
  1325. cpu_relax();
  1326. /* active_list entries will end up before queued entries */
  1327. list_splice_init(&atchan->queue, &list);
  1328. list_splice_init(&atchan->active_list, &list);
  1329. /* Flush all pending and queued descriptors */
  1330. list_for_each_entry_safe(desc, _desc, &list, desc_node)
  1331. atc_chain_complete(atchan, desc);
  1332. clear_bit(ATC_IS_PAUSED, &atchan->status);
  1333. /* if channel dedicated to cyclic operations, free it */
  1334. clear_bit(ATC_IS_CYCLIC, &atchan->status);
  1335. spin_unlock_irqrestore(&atchan->lock, flags);
  1336. return 0;
  1337. }
  1338. /**
  1339. * atc_tx_status - poll for transaction completion
  1340. * @chan: DMA channel
  1341. * @cookie: transaction identifier to check status of
  1342. * @txstate: if not %NULL updated with transaction state
  1343. *
  1344. * If @txstate is passed in, upon return it reflect the driver
  1345. * internal state and can be used with dma_async_is_complete() to check
  1346. * the status of multiple cookies without re-checking hardware state.
  1347. */
  1348. static enum dma_status
  1349. atc_tx_status(struct dma_chan *chan,
  1350. dma_cookie_t cookie,
  1351. struct dma_tx_state *txstate)
  1352. {
  1353. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  1354. unsigned long flags;
  1355. enum dma_status ret;
  1356. int bytes = 0;
  1357. ret = dma_cookie_status(chan, cookie, txstate);
  1358. if (ret == DMA_COMPLETE)
  1359. return ret;
  1360. /*
  1361. * There's no point calculating the residue if there's
  1362. * no txstate to store the value.
  1363. */
  1364. if (!txstate)
  1365. return DMA_ERROR;
  1366. spin_lock_irqsave(&atchan->lock, flags);
  1367. /* Get number of bytes left in the active transactions */
  1368. bytes = atc_get_bytes_left(chan, cookie);
  1369. spin_unlock_irqrestore(&atchan->lock, flags);
  1370. if (unlikely(bytes < 0)) {
  1371. dev_vdbg(chan2dev(chan), "get residual bytes error\n");
  1372. return DMA_ERROR;
  1373. } else {
  1374. dma_set_residue(txstate, bytes);
  1375. }
  1376. dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n",
  1377. ret, cookie, bytes);
  1378. return ret;
  1379. }
  1380. /**
  1381. * atc_issue_pending - try to finish work
  1382. * @chan: target DMA channel
  1383. */
  1384. static void atc_issue_pending(struct dma_chan *chan)
  1385. {
  1386. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  1387. unsigned long flags;
  1388. dev_vdbg(chan2dev(chan), "issue_pending\n");
  1389. /* Not needed for cyclic transfers */
  1390. if (atc_chan_is_cyclic(atchan))
  1391. return;
  1392. spin_lock_irqsave(&atchan->lock, flags);
  1393. atc_advance_work(atchan);
  1394. spin_unlock_irqrestore(&atchan->lock, flags);
  1395. }
  1396. /**
  1397. * atc_alloc_chan_resources - allocate resources for DMA channel
  1398. * @chan: allocate descriptor resources for this channel
  1399. * @client: current client requesting the channel be ready for requests
  1400. *
  1401. * return - the number of allocated descriptors
  1402. */
  1403. static int atc_alloc_chan_resources(struct dma_chan *chan)
  1404. {
  1405. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  1406. struct at_dma *atdma = to_at_dma(chan->device);
  1407. struct at_desc *desc;
  1408. struct at_dma_slave *atslave;
  1409. unsigned long flags;
  1410. int i;
  1411. u32 cfg;
  1412. LIST_HEAD(tmp_list);
  1413. dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
  1414. /* ASSERT: channel is idle */
  1415. if (atc_chan_is_enabled(atchan)) {
  1416. dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
  1417. return -EIO;
  1418. }
  1419. cfg = ATC_DEFAULT_CFG;
  1420. atslave = chan->private;
  1421. if (atslave) {
  1422. /*
  1423. * We need controller-specific data to set up slave
  1424. * transfers.
  1425. */
  1426. BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
  1427. /* if cfg configuration specified take it instead of default */
  1428. if (atslave->cfg)
  1429. cfg = atslave->cfg;
  1430. }
  1431. /* have we already been set up?
  1432. * reconfigure channel but no need to reallocate descriptors */
  1433. if (!list_empty(&atchan->free_list))
  1434. return atchan->descs_allocated;
  1435. /* Allocate initial pool of descriptors */
  1436. for (i = 0; i < init_nr_desc_per_channel; i++) {
  1437. desc = atc_alloc_descriptor(chan, GFP_KERNEL);
  1438. if (!desc) {
  1439. dev_err(atdma->dma_common.dev,
  1440. "Only %d initial descriptors\n", i);
  1441. break;
  1442. }
  1443. list_add_tail(&desc->desc_node, &tmp_list);
  1444. }
  1445. spin_lock_irqsave(&atchan->lock, flags);
  1446. atchan->descs_allocated = i;
  1447. list_splice(&tmp_list, &atchan->free_list);
  1448. dma_cookie_init(chan);
  1449. spin_unlock_irqrestore(&atchan->lock, flags);
  1450. /* channel parameters */
  1451. channel_writel(atchan, CFG, cfg);
  1452. dev_dbg(chan2dev(chan),
  1453. "alloc_chan_resources: allocated %d descriptors\n",
  1454. atchan->descs_allocated);
  1455. return atchan->descs_allocated;
  1456. }
  1457. /**
  1458. * atc_free_chan_resources - free all channel resources
  1459. * @chan: DMA channel
  1460. */
  1461. static void atc_free_chan_resources(struct dma_chan *chan)
  1462. {
  1463. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  1464. struct at_dma *atdma = to_at_dma(chan->device);
  1465. struct at_desc *desc, *_desc;
  1466. LIST_HEAD(list);
  1467. dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
  1468. atchan->descs_allocated);
  1469. /* ASSERT: channel is idle */
  1470. BUG_ON(!list_empty(&atchan->active_list));
  1471. BUG_ON(!list_empty(&atchan->queue));
  1472. BUG_ON(atc_chan_is_enabled(atchan));
  1473. list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
  1474. dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
  1475. list_del(&desc->desc_node);
  1476. /* free link descriptor */
  1477. dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
  1478. }
  1479. list_splice_init(&atchan->free_list, &list);
  1480. atchan->descs_allocated = 0;
  1481. atchan->status = 0;
  1482. dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
  1483. }
  1484. #ifdef CONFIG_OF
  1485. static bool at_dma_filter(struct dma_chan *chan, void *slave)
  1486. {
  1487. struct at_dma_slave *atslave = slave;
  1488. if (atslave->dma_dev == chan->device->dev) {
  1489. chan->private = atslave;
  1490. return true;
  1491. } else {
  1492. return false;
  1493. }
  1494. }
  1495. static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
  1496. struct of_dma *of_dma)
  1497. {
  1498. struct dma_chan *chan;
  1499. struct at_dma_chan *atchan;
  1500. struct at_dma_slave *atslave;
  1501. dma_cap_mask_t mask;
  1502. unsigned int per_id;
  1503. struct platform_device *dmac_pdev;
  1504. if (dma_spec->args_count != 2)
  1505. return NULL;
  1506. dmac_pdev = of_find_device_by_node(dma_spec->np);
  1507. dma_cap_zero(mask);
  1508. dma_cap_set(DMA_SLAVE, mask);
  1509. atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL);
  1510. if (!atslave)
  1511. return NULL;
  1512. atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
  1513. /*
  1514. * We can fill both SRC_PER and DST_PER, one of these fields will be
  1515. * ignored depending on DMA transfer direction.
  1516. */
  1517. per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK;
  1518. atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id)
  1519. | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id);
  1520. /*
  1521. * We have to translate the value we get from the device tree since
  1522. * the half FIFO configuration value had to be 0 to keep backward
  1523. * compatibility.
  1524. */
  1525. switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) {
  1526. case AT91_DMA_CFG_FIFOCFG_ALAP:
  1527. atslave->cfg |= ATC_FIFOCFG_LARGESTBURST;
  1528. break;
  1529. case AT91_DMA_CFG_FIFOCFG_ASAP:
  1530. atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE;
  1531. break;
  1532. case AT91_DMA_CFG_FIFOCFG_HALF:
  1533. default:
  1534. atslave->cfg |= ATC_FIFOCFG_HALFFIFO;
  1535. }
  1536. atslave->dma_dev = &dmac_pdev->dev;
  1537. chan = dma_request_channel(mask, at_dma_filter, atslave);
  1538. if (!chan)
  1539. return NULL;
  1540. atchan = to_at_dma_chan(chan);
  1541. atchan->per_if = dma_spec->args[0] & 0xff;
  1542. atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff;
  1543. return chan;
  1544. }
  1545. #else
  1546. static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
  1547. struct of_dma *of_dma)
  1548. {
  1549. return NULL;
  1550. }
  1551. #endif
  1552. /*-- Module Management -----------------------------------------------*/
  1553. /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
  1554. static struct at_dma_platform_data at91sam9rl_config = {
  1555. .nr_channels = 2,
  1556. };
  1557. static struct at_dma_platform_data at91sam9g45_config = {
  1558. .nr_channels = 8,
  1559. };
  1560. #if defined(CONFIG_OF)
  1561. static const struct of_device_id atmel_dma_dt_ids[] = {
  1562. {
  1563. .compatible = "atmel,at91sam9rl-dma",
  1564. .data = &at91sam9rl_config,
  1565. }, {
  1566. .compatible = "atmel,at91sam9g45-dma",
  1567. .data = &at91sam9g45_config,
  1568. }, {
  1569. /* sentinel */
  1570. }
  1571. };
  1572. MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
  1573. #endif
  1574. static const struct platform_device_id atdma_devtypes[] = {
  1575. {
  1576. .name = "at91sam9rl_dma",
  1577. .driver_data = (unsigned long) &at91sam9rl_config,
  1578. }, {
  1579. .name = "at91sam9g45_dma",
  1580. .driver_data = (unsigned long) &at91sam9g45_config,
  1581. }, {
  1582. /* sentinel */
  1583. }
  1584. };
  1585. static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
  1586. struct platform_device *pdev)
  1587. {
  1588. if (pdev->dev.of_node) {
  1589. const struct of_device_id *match;
  1590. match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
  1591. if (match == NULL)
  1592. return NULL;
  1593. return match->data;
  1594. }
  1595. return (struct at_dma_platform_data *)
  1596. platform_get_device_id(pdev)->driver_data;
  1597. }
  1598. /**
  1599. * at_dma_off - disable DMA controller
  1600. * @atdma: the Atmel HDAMC device
  1601. */
  1602. static void at_dma_off(struct at_dma *atdma)
  1603. {
  1604. dma_writel(atdma, EN, 0);
  1605. /* disable all interrupts */
  1606. dma_writel(atdma, EBCIDR, -1L);
  1607. /* confirm that all channels are disabled */
  1608. while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
  1609. cpu_relax();
  1610. }
  1611. static int __init at_dma_probe(struct platform_device *pdev)
  1612. {
  1613. struct resource *io;
  1614. struct at_dma *atdma;
  1615. size_t size;
  1616. int irq;
  1617. int err;
  1618. int i;
  1619. const struct at_dma_platform_data *plat_dat;
  1620. /* setup platform data for each SoC */
  1621. dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
  1622. dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask);
  1623. dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
  1624. dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
  1625. dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
  1626. dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask);
  1627. dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
  1628. dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
  1629. dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask);
  1630. /* get DMA parameters from controller type */
  1631. plat_dat = at_dma_get_driver_data(pdev);
  1632. if (!plat_dat)
  1633. return -ENODEV;
  1634. io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1635. if (!io)
  1636. return -EINVAL;
  1637. irq = platform_get_irq(pdev, 0);
  1638. if (irq < 0)
  1639. return irq;
  1640. size = sizeof(struct at_dma);
  1641. size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
  1642. atdma = kzalloc(size, GFP_KERNEL);
  1643. if (!atdma)
  1644. return -ENOMEM;
  1645. /* discover transaction capabilities */
  1646. atdma->dma_common.cap_mask = plat_dat->cap_mask;
  1647. atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
  1648. size = resource_size(io);
  1649. if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
  1650. err = -EBUSY;
  1651. goto err_kfree;
  1652. }
  1653. atdma->regs = ioremap(io->start, size);
  1654. if (!atdma->regs) {
  1655. err = -ENOMEM;
  1656. goto err_release_r;
  1657. }
  1658. atdma->clk = clk_get(&pdev->dev, "dma_clk");
  1659. if (IS_ERR(atdma->clk)) {
  1660. err = PTR_ERR(atdma->clk);
  1661. goto err_clk;
  1662. }
  1663. err = clk_prepare_enable(atdma->clk);
  1664. if (err)
  1665. goto err_clk_prepare;
  1666. /* force dma off, just in case */
  1667. at_dma_off(atdma);
  1668. err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
  1669. if (err)
  1670. goto err_irq;
  1671. platform_set_drvdata(pdev, atdma);
  1672. /* create a pool of consistent memory blocks for hardware descriptors */
  1673. atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
  1674. &pdev->dev, sizeof(struct at_desc),
  1675. 4 /* word alignment */, 0);
  1676. if (!atdma->dma_desc_pool) {
  1677. dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
  1678. err = -ENOMEM;
  1679. goto err_desc_pool_create;
  1680. }
  1681. /* create a pool of consistent memory blocks for memset blocks */
  1682. atdma->memset_pool = dma_pool_create("at_hdmac_memset_pool",
  1683. &pdev->dev, sizeof(int), 4, 0);
  1684. if (!atdma->memset_pool) {
  1685. dev_err(&pdev->dev, "No memory for memset dma pool\n");
  1686. err = -ENOMEM;
  1687. goto err_memset_pool_create;
  1688. }
  1689. /* clear any pending interrupt */
  1690. while (dma_readl(atdma, EBCISR))
  1691. cpu_relax();
  1692. /* initialize channels related values */
  1693. INIT_LIST_HEAD(&atdma->dma_common.channels);
  1694. for (i = 0; i < plat_dat->nr_channels; i++) {
  1695. struct at_dma_chan *atchan = &atdma->chan[i];
  1696. atchan->mem_if = AT_DMA_MEM_IF;
  1697. atchan->per_if = AT_DMA_PER_IF;
  1698. atchan->chan_common.device = &atdma->dma_common;
  1699. dma_cookie_init(&atchan->chan_common);
  1700. list_add_tail(&atchan->chan_common.device_node,
  1701. &atdma->dma_common.channels);
  1702. atchan->ch_regs = atdma->regs + ch_regs(i);
  1703. spin_lock_init(&atchan->lock);
  1704. atchan->mask = 1 << i;
  1705. INIT_LIST_HEAD(&atchan->active_list);
  1706. INIT_LIST_HEAD(&atchan->queue);
  1707. INIT_LIST_HEAD(&atchan->free_list);
  1708. tasklet_init(&atchan->tasklet, atc_tasklet,
  1709. (unsigned long)atchan);
  1710. atc_enable_chan_irq(atdma, i);
  1711. }
  1712. /* set base routines */
  1713. atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
  1714. atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
  1715. atdma->dma_common.device_tx_status = atc_tx_status;
  1716. atdma->dma_common.device_issue_pending = atc_issue_pending;
  1717. atdma->dma_common.dev = &pdev->dev;
  1718. /* set prep routines based on capability */
  1719. if (dma_has_cap(DMA_INTERLEAVE, atdma->dma_common.cap_mask))
  1720. atdma->dma_common.device_prep_interleaved_dma = atc_prep_dma_interleaved;
  1721. if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
  1722. atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
  1723. if (dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask)) {
  1724. atdma->dma_common.device_prep_dma_memset = atc_prep_dma_memset;
  1725. atdma->dma_common.device_prep_dma_memset_sg = atc_prep_dma_memset_sg;
  1726. atdma->dma_common.fill_align = DMAENGINE_ALIGN_4_BYTES;
  1727. }
  1728. if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
  1729. atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
  1730. /* controller can do slave DMA: can trigger cyclic transfers */
  1731. dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
  1732. atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
  1733. atdma->dma_common.device_config = atc_config;
  1734. atdma->dma_common.device_pause = atc_pause;
  1735. atdma->dma_common.device_resume = atc_resume;
  1736. atdma->dma_common.device_terminate_all = atc_terminate_all;
  1737. atdma->dma_common.src_addr_widths = ATC_DMA_BUSWIDTHS;
  1738. atdma->dma_common.dst_addr_widths = ATC_DMA_BUSWIDTHS;
  1739. atdma->dma_common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
  1740. atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  1741. }
  1742. if (dma_has_cap(DMA_SG, atdma->dma_common.cap_mask))
  1743. atdma->dma_common.device_prep_dma_sg = atc_prep_dma_sg;
  1744. dma_writel(atdma, EN, AT_DMA_ENABLE);
  1745. dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s%s), %d channels\n",
  1746. dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
  1747. dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "",
  1748. dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
  1749. dma_has_cap(DMA_SG, atdma->dma_common.cap_mask) ? "sg-cpy " : "",
  1750. plat_dat->nr_channels);
  1751. dma_async_device_register(&atdma->dma_common);
  1752. /*
  1753. * Do not return an error if the dmac node is not present in order to
  1754. * not break the existing way of requesting channel with
  1755. * dma_request_channel().
  1756. */
  1757. if (pdev->dev.of_node) {
  1758. err = of_dma_controller_register(pdev->dev.of_node,
  1759. at_dma_xlate, atdma);
  1760. if (err) {
  1761. dev_err(&pdev->dev, "could not register of_dma_controller\n");
  1762. goto err_of_dma_controller_register;
  1763. }
  1764. }
  1765. return 0;
  1766. err_of_dma_controller_register:
  1767. dma_async_device_unregister(&atdma->dma_common);
  1768. dma_pool_destroy(atdma->memset_pool);
  1769. err_memset_pool_create:
  1770. dma_pool_destroy(atdma->dma_desc_pool);
  1771. err_desc_pool_create:
  1772. free_irq(platform_get_irq(pdev, 0), atdma);
  1773. err_irq:
  1774. clk_disable_unprepare(atdma->clk);
  1775. err_clk_prepare:
  1776. clk_put(atdma->clk);
  1777. err_clk:
  1778. iounmap(atdma->regs);
  1779. atdma->regs = NULL;
  1780. err_release_r:
  1781. release_mem_region(io->start, size);
  1782. err_kfree:
  1783. kfree(atdma);
  1784. return err;
  1785. }
  1786. static int at_dma_remove(struct platform_device *pdev)
  1787. {
  1788. struct at_dma *atdma = platform_get_drvdata(pdev);
  1789. struct dma_chan *chan, *_chan;
  1790. struct resource *io;
  1791. at_dma_off(atdma);
  1792. dma_async_device_unregister(&atdma->dma_common);
  1793. dma_pool_destroy(atdma->memset_pool);
  1794. dma_pool_destroy(atdma->dma_desc_pool);
  1795. free_irq(platform_get_irq(pdev, 0), atdma);
  1796. list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
  1797. device_node) {
  1798. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  1799. /* Disable interrupts */
  1800. atc_disable_chan_irq(atdma, chan->chan_id);
  1801. tasklet_kill(&atchan->tasklet);
  1802. list_del(&chan->device_node);
  1803. }
  1804. clk_disable_unprepare(atdma->clk);
  1805. clk_put(atdma->clk);
  1806. iounmap(atdma->regs);
  1807. atdma->regs = NULL;
  1808. io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1809. release_mem_region(io->start, resource_size(io));
  1810. kfree(atdma);
  1811. return 0;
  1812. }
  1813. static void at_dma_shutdown(struct platform_device *pdev)
  1814. {
  1815. struct at_dma *atdma = platform_get_drvdata(pdev);
  1816. at_dma_off(platform_get_drvdata(pdev));
  1817. clk_disable_unprepare(atdma->clk);
  1818. }
  1819. static int at_dma_prepare(struct device *dev)
  1820. {
  1821. struct platform_device *pdev = to_platform_device(dev);
  1822. struct at_dma *atdma = platform_get_drvdata(pdev);
  1823. struct dma_chan *chan, *_chan;
  1824. list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
  1825. device_node) {
  1826. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  1827. /* wait for transaction completion (except in cyclic case) */
  1828. if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
  1829. return -EAGAIN;
  1830. }
  1831. return 0;
  1832. }
  1833. static void atc_suspend_cyclic(struct at_dma_chan *atchan)
  1834. {
  1835. struct dma_chan *chan = &atchan->chan_common;
  1836. /* Channel should be paused by user
  1837. * do it anyway even if it is not done already */
  1838. if (!atc_chan_is_paused(atchan)) {
  1839. dev_warn(chan2dev(chan),
  1840. "cyclic channel not paused, should be done by channel user\n");
  1841. atc_pause(chan);
  1842. }
  1843. /* now preserve additional data for cyclic operations */
  1844. /* next descriptor address in the cyclic list */
  1845. atchan->save_dscr = channel_readl(atchan, DSCR);
  1846. vdbg_dump_regs(atchan);
  1847. }
  1848. static int at_dma_suspend_noirq(struct device *dev)
  1849. {
  1850. struct platform_device *pdev = to_platform_device(dev);
  1851. struct at_dma *atdma = platform_get_drvdata(pdev);
  1852. struct dma_chan *chan, *_chan;
  1853. /* preserve data */
  1854. list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
  1855. device_node) {
  1856. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  1857. if (atc_chan_is_cyclic(atchan))
  1858. atc_suspend_cyclic(atchan);
  1859. atchan->save_cfg = channel_readl(atchan, CFG);
  1860. }
  1861. atdma->save_imr = dma_readl(atdma, EBCIMR);
  1862. /* disable DMA controller */
  1863. at_dma_off(atdma);
  1864. clk_disable_unprepare(atdma->clk);
  1865. return 0;
  1866. }
  1867. static void atc_resume_cyclic(struct at_dma_chan *atchan)
  1868. {
  1869. struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
  1870. /* restore channel status for cyclic descriptors list:
  1871. * next descriptor in the cyclic list at the time of suspend */
  1872. channel_writel(atchan, SADDR, 0);
  1873. channel_writel(atchan, DADDR, 0);
  1874. channel_writel(atchan, CTRLA, 0);
  1875. channel_writel(atchan, CTRLB, 0);
  1876. channel_writel(atchan, DSCR, atchan->save_dscr);
  1877. dma_writel(atdma, CHER, atchan->mask);
  1878. /* channel pause status should be removed by channel user
  1879. * We cannot take the initiative to do it here */
  1880. vdbg_dump_regs(atchan);
  1881. }
  1882. static int at_dma_resume_noirq(struct device *dev)
  1883. {
  1884. struct platform_device *pdev = to_platform_device(dev);
  1885. struct at_dma *atdma = platform_get_drvdata(pdev);
  1886. struct dma_chan *chan, *_chan;
  1887. /* bring back DMA controller */
  1888. clk_prepare_enable(atdma->clk);
  1889. dma_writel(atdma, EN, AT_DMA_ENABLE);
  1890. /* clear any pending interrupt */
  1891. while (dma_readl(atdma, EBCISR))
  1892. cpu_relax();
  1893. /* restore saved data */
  1894. dma_writel(atdma, EBCIER, atdma->save_imr);
  1895. list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
  1896. device_node) {
  1897. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  1898. channel_writel(atchan, CFG, atchan->save_cfg);
  1899. if (atc_chan_is_cyclic(atchan))
  1900. atc_resume_cyclic(atchan);
  1901. }
  1902. return 0;
  1903. }
  1904. static const struct dev_pm_ops at_dma_dev_pm_ops = {
  1905. .prepare = at_dma_prepare,
  1906. .suspend_noirq = at_dma_suspend_noirq,
  1907. .resume_noirq = at_dma_resume_noirq,
  1908. };
  1909. static struct platform_driver at_dma_driver = {
  1910. .remove = at_dma_remove,
  1911. .shutdown = at_dma_shutdown,
  1912. .id_table = atdma_devtypes,
  1913. .driver = {
  1914. .name = "at_hdmac",
  1915. .pm = &at_dma_dev_pm_ops,
  1916. .of_match_table = of_match_ptr(atmel_dma_dt_ids),
  1917. },
  1918. };
  1919. static int __init at_dma_init(void)
  1920. {
  1921. return platform_driver_probe(&at_dma_driver, at_dma_probe);
  1922. }
  1923. subsys_initcall(at_dma_init);
  1924. static void __exit at_dma_exit(void)
  1925. {
  1926. platform_driver_unregister(&at_dma_driver);
  1927. }
  1928. module_exit(at_dma_exit);
  1929. MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
  1930. MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
  1931. MODULE_LICENSE("GPL");
  1932. MODULE_ALIAS("platform:at_hdmac");