dw_mmc.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532
  1. /*
  2. * (C) Copyright 2012 SAMSUNG Electronics
  3. * Jaehoon Chung <jh80.chung@samsung.com>
  4. * Rajeshawari Shinde <rajeshwari.s@samsung.com>
  5. *
  6. * SPDX-License-Identifier: GPL-2.0+
  7. */
  8. #include <bouncebuf.h>
  9. #include <common.h>
  10. #include <errno.h>
  11. #include <malloc.h>
  12. #include <memalign.h>
  13. #include <mmc.h>
  14. #include <dwmmc.h>
  15. #define PAGE_SIZE 4096
  16. static int dwmci_wait_reset(struct dwmci_host *host, u32 value)
  17. {
  18. unsigned long timeout = 1000;
  19. u32 ctrl;
  20. dwmci_writel(host, DWMCI_CTRL, value);
  21. while (timeout--) {
  22. ctrl = dwmci_readl(host, DWMCI_CTRL);
  23. if (!(ctrl & DWMCI_RESET_ALL))
  24. return 1;
  25. }
  26. return 0;
  27. }
  28. static void dwmci_set_idma_desc(struct dwmci_idmac *idmac,
  29. u32 desc0, u32 desc1, u32 desc2)
  30. {
  31. struct dwmci_idmac *desc = idmac;
  32. desc->flags = desc0;
  33. desc->cnt = desc1;
  34. desc->addr = desc2;
  35. desc->next_addr = (ulong)desc + sizeof(struct dwmci_idmac);
  36. }
  37. static void dwmci_prepare_data(struct dwmci_host *host,
  38. struct mmc_data *data,
  39. struct dwmci_idmac *cur_idmac,
  40. void *bounce_buffer)
  41. {
  42. unsigned long ctrl;
  43. unsigned int i = 0, flags, cnt, blk_cnt;
  44. ulong data_start, data_end;
  45. blk_cnt = data->blocks;
  46. dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
  47. data_start = (ulong)cur_idmac;
  48. dwmci_writel(host, DWMCI_DBADDR, (ulong)cur_idmac);
  49. do {
  50. flags = DWMCI_IDMAC_OWN | DWMCI_IDMAC_CH ;
  51. flags |= (i == 0) ? DWMCI_IDMAC_FS : 0;
  52. if (blk_cnt <= 8) {
  53. flags |= DWMCI_IDMAC_LD;
  54. cnt = data->blocksize * blk_cnt;
  55. } else
  56. cnt = data->blocksize * 8;
  57. dwmci_set_idma_desc(cur_idmac, flags, cnt,
  58. (ulong)bounce_buffer + (i * PAGE_SIZE));
  59. if (blk_cnt <= 8)
  60. break;
  61. blk_cnt -= 8;
  62. cur_idmac++;
  63. i++;
  64. } while(1);
  65. data_end = (ulong)cur_idmac;
  66. flush_dcache_range(data_start, data_end + ARCH_DMA_MINALIGN);
  67. ctrl = dwmci_readl(host, DWMCI_CTRL);
  68. ctrl |= DWMCI_IDMAC_EN | DWMCI_DMA_EN;
  69. dwmci_writel(host, DWMCI_CTRL, ctrl);
  70. ctrl = dwmci_readl(host, DWMCI_BMOD);
  71. ctrl |= DWMCI_BMOD_IDMAC_FB | DWMCI_BMOD_IDMAC_EN;
  72. dwmci_writel(host, DWMCI_BMOD, ctrl);
  73. dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
  74. dwmci_writel(host, DWMCI_BYTCNT, data->blocksize * data->blocks);
  75. }
  76. static int dwmci_data_transfer(struct dwmci_host *host, struct mmc_data *data)
  77. {
  78. int ret = 0;
  79. u32 timeout = 240000;
  80. u32 mask, size, i, len = 0;
  81. u32 *buf = NULL;
  82. ulong start = get_timer(0);
  83. u32 fifo_depth = (((host->fifoth_val & RX_WMARK_MASK) >>
  84. RX_WMARK_SHIFT) + 1) * 2;
  85. size = data->blocksize * data->blocks / 4;
  86. if (data->flags == MMC_DATA_READ)
  87. buf = (unsigned int *)data->dest;
  88. else
  89. buf = (unsigned int *)data->src;
  90. for (;;) {
  91. mask = dwmci_readl(host, DWMCI_RINTSTS);
  92. /* Error during data transfer. */
  93. if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) {
  94. debug("%s: DATA ERROR!\n", __func__);
  95. ret = -EINVAL;
  96. break;
  97. }
  98. if (host->fifo_mode && size) {
  99. len = 0;
  100. if (data->flags == MMC_DATA_READ &&
  101. (mask & DWMCI_INTMSK_RXDR)) {
  102. while (size) {
  103. len = dwmci_readl(host, DWMCI_STATUS);
  104. len = (len >> DWMCI_FIFO_SHIFT) &
  105. DWMCI_FIFO_MASK;
  106. len = min(size, len);
  107. for (i = 0; i < len; i++)
  108. *buf++ =
  109. dwmci_readl(host, DWMCI_DATA);
  110. size = size > len ? (size - len) : 0;
  111. }
  112. dwmci_writel(host, DWMCI_RINTSTS,
  113. DWMCI_INTMSK_RXDR);
  114. } else if (data->flags == MMC_DATA_WRITE &&
  115. (mask & DWMCI_INTMSK_TXDR)) {
  116. while (size) {
  117. len = dwmci_readl(host, DWMCI_STATUS);
  118. len = fifo_depth - ((len >>
  119. DWMCI_FIFO_SHIFT) &
  120. DWMCI_FIFO_MASK);
  121. len = min(size, len);
  122. for (i = 0; i < len; i++)
  123. dwmci_writel(host, DWMCI_DATA,
  124. *buf++);
  125. size = size > len ? (size - len) : 0;
  126. }
  127. dwmci_writel(host, DWMCI_RINTSTS,
  128. DWMCI_INTMSK_TXDR);
  129. }
  130. }
  131. /* Data arrived correctly. */
  132. if (mask & DWMCI_INTMSK_DTO) {
  133. ret = 0;
  134. break;
  135. }
  136. /* Check for timeout. */
  137. if (get_timer(start) > timeout) {
  138. debug("%s: Timeout waiting for data!\n",
  139. __func__);
  140. ret = -ETIMEDOUT;
  141. break;
  142. }
  143. }
  144. dwmci_writel(host, DWMCI_RINTSTS, mask);
  145. return ret;
  146. }
  147. static int dwmci_set_transfer_mode(struct dwmci_host *host,
  148. struct mmc_data *data)
  149. {
  150. unsigned long mode;
  151. mode = DWMCI_CMD_DATA_EXP;
  152. if (data->flags & MMC_DATA_WRITE)
  153. mode |= DWMCI_CMD_RW;
  154. return mode;
  155. }
  156. #ifdef CONFIG_DM_MMC_OPS
  157. static int dwmci_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
  158. struct mmc_data *data)
  159. {
  160. struct mmc *mmc = mmc_get_mmc_dev(dev);
  161. #else
  162. static int dwmci_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
  163. struct mmc_data *data)
  164. {
  165. #endif
  166. struct dwmci_host *host = mmc->priv;
  167. ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac, cur_idmac,
  168. data ? DIV_ROUND_UP(data->blocks, 8) : 0);
  169. int ret = 0, flags = 0, i;
  170. unsigned int timeout = 500;
  171. u32 retry = 100000;
  172. u32 mask, ctrl;
  173. ulong start = get_timer(0);
  174. struct bounce_buffer bbstate;
  175. while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
  176. if (get_timer(start) > timeout) {
  177. debug("%s: Timeout on data busy\n", __func__);
  178. return -ETIMEDOUT;
  179. }
  180. }
  181. dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
  182. if (data) {
  183. if (host->fifo_mode) {
  184. dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
  185. dwmci_writel(host, DWMCI_BYTCNT,
  186. data->blocksize * data->blocks);
  187. dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
  188. } else {
  189. if (data->flags == MMC_DATA_READ) {
  190. bounce_buffer_start(&bbstate, (void*)data->dest,
  191. data->blocksize *
  192. data->blocks, GEN_BB_WRITE);
  193. } else {
  194. bounce_buffer_start(&bbstate, (void*)data->src,
  195. data->blocksize *
  196. data->blocks, GEN_BB_READ);
  197. }
  198. dwmci_prepare_data(host, data, cur_idmac,
  199. bbstate.bounce_buffer);
  200. }
  201. }
  202. dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
  203. if (data)
  204. flags = dwmci_set_transfer_mode(host, data);
  205. if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
  206. return -1;
  207. if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
  208. flags |= DWMCI_CMD_ABORT_STOP;
  209. else
  210. flags |= DWMCI_CMD_PRV_DAT_WAIT;
  211. if (cmd->resp_type & MMC_RSP_PRESENT) {
  212. flags |= DWMCI_CMD_RESP_EXP;
  213. if (cmd->resp_type & MMC_RSP_136)
  214. flags |= DWMCI_CMD_RESP_LENGTH;
  215. }
  216. if (cmd->resp_type & MMC_RSP_CRC)
  217. flags |= DWMCI_CMD_CHECK_CRC;
  218. flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
  219. debug("Sending CMD%d\n",cmd->cmdidx);
  220. dwmci_writel(host, DWMCI_CMD, flags);
  221. for (i = 0; i < retry; i++) {
  222. mask = dwmci_readl(host, DWMCI_RINTSTS);
  223. if (mask & DWMCI_INTMSK_CDONE) {
  224. if (!data)
  225. dwmci_writel(host, DWMCI_RINTSTS, mask);
  226. break;
  227. }
  228. }
  229. if (i == retry) {
  230. debug("%s: Timeout.\n", __func__);
  231. return -ETIMEDOUT;
  232. }
  233. if (mask & DWMCI_INTMSK_RTO) {
  234. /*
  235. * Timeout here is not necessarily fatal. (e)MMC cards
  236. * will splat here when they receive CMD55 as they do
  237. * not support this command and that is exactly the way
  238. * to tell them apart from SD cards. Thus, this output
  239. * below shall be debug(). eMMC cards also do not favor
  240. * CMD8, please keep that in mind.
  241. */
  242. debug("%s: Response Timeout.\n", __func__);
  243. return -ETIMEDOUT;
  244. } else if (mask & DWMCI_INTMSK_RE) {
  245. debug("%s: Response Error.\n", __func__);
  246. return -EIO;
  247. }
  248. if (cmd->resp_type & MMC_RSP_PRESENT) {
  249. if (cmd->resp_type & MMC_RSP_136) {
  250. cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
  251. cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
  252. cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
  253. cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
  254. } else {
  255. cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
  256. }
  257. }
  258. if (data) {
  259. ret = dwmci_data_transfer(host, data);
  260. /* only dma mode need it */
  261. if (!host->fifo_mode) {
  262. ctrl = dwmci_readl(host, DWMCI_CTRL);
  263. ctrl &= ~(DWMCI_DMA_EN);
  264. dwmci_writel(host, DWMCI_CTRL, ctrl);
  265. bounce_buffer_stop(&bbstate);
  266. }
  267. }
  268. udelay(100);
  269. return ret;
  270. }
  271. static int dwmci_setup_bus(struct dwmci_host *host, u32 freq)
  272. {
  273. u32 div, status;
  274. int timeout = 10000;
  275. unsigned long sclk;
  276. if ((freq == host->clock) || (freq == 0))
  277. return 0;
  278. /*
  279. * If host->get_mmc_clk isn't defined,
  280. * then assume that host->bus_hz is source clock value.
  281. * host->bus_hz should be set by user.
  282. */
  283. if (host->get_mmc_clk)
  284. sclk = host->get_mmc_clk(host, freq);
  285. else if (host->bus_hz)
  286. sclk = host->bus_hz;
  287. else {
  288. debug("%s: Didn't get source clock value.\n", __func__);
  289. return -EINVAL;
  290. }
  291. if (sclk == freq)
  292. div = 0; /* bypass mode */
  293. else
  294. div = DIV_ROUND_UP(sclk, 2 * freq);
  295. dwmci_writel(host, DWMCI_CLKENA, 0);
  296. dwmci_writel(host, DWMCI_CLKSRC, 0);
  297. dwmci_writel(host, DWMCI_CLKDIV, div);
  298. dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
  299. DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
  300. do {
  301. status = dwmci_readl(host, DWMCI_CMD);
  302. if (timeout-- < 0) {
  303. debug("%s: Timeout!\n", __func__);
  304. return -ETIMEDOUT;
  305. }
  306. } while (status & DWMCI_CMD_START);
  307. dwmci_writel(host, DWMCI_CLKENA, DWMCI_CLKEN_ENABLE |
  308. DWMCI_CLKEN_LOW_PWR);
  309. dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
  310. DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
  311. timeout = 10000;
  312. do {
  313. status = dwmci_readl(host, DWMCI_CMD);
  314. if (timeout-- < 0) {
  315. debug("%s: Timeout!\n", __func__);
  316. return -ETIMEDOUT;
  317. }
  318. } while (status & DWMCI_CMD_START);
  319. host->clock = freq;
  320. return 0;
  321. }
  322. #ifdef CONFIG_DM_MMC_OPS
  323. static int dwmci_set_ios(struct udevice *dev)
  324. {
  325. struct mmc *mmc = mmc_get_mmc_dev(dev);
  326. #else
  327. static int dwmci_set_ios(struct mmc *mmc)
  328. {
  329. #endif
  330. struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
  331. u32 ctype, regs;
  332. debug("Buswidth = %d, clock: %d\n", mmc->bus_width, mmc->clock);
  333. dwmci_setup_bus(host, mmc->clock);
  334. switch (mmc->bus_width) {
  335. case 8:
  336. ctype = DWMCI_CTYPE_8BIT;
  337. break;
  338. case 4:
  339. ctype = DWMCI_CTYPE_4BIT;
  340. break;
  341. default:
  342. ctype = DWMCI_CTYPE_1BIT;
  343. break;
  344. }
  345. dwmci_writel(host, DWMCI_CTYPE, ctype);
  346. regs = dwmci_readl(host, DWMCI_UHS_REG);
  347. if (mmc->ddr_mode)
  348. regs |= DWMCI_DDR_MODE;
  349. else
  350. regs &= ~DWMCI_DDR_MODE;
  351. dwmci_writel(host, DWMCI_UHS_REG, regs);
  352. if (host->clksel)
  353. host->clksel(host);
  354. return 0;
  355. }
  356. static int dwmci_init(struct mmc *mmc)
  357. {
  358. struct dwmci_host *host = mmc->priv;
  359. if (host->board_init)
  360. host->board_init(host);
  361. dwmci_writel(host, DWMCI_PWREN, 1);
  362. if (!dwmci_wait_reset(host, DWMCI_RESET_ALL)) {
  363. debug("%s[%d] Fail-reset!!\n", __func__, __LINE__);
  364. return -EIO;
  365. }
  366. /* Enumerate at 400KHz */
  367. dwmci_setup_bus(host, mmc->cfg->f_min);
  368. dwmci_writel(host, DWMCI_RINTSTS, 0xFFFFFFFF);
  369. dwmci_writel(host, DWMCI_INTMASK, 0);
  370. dwmci_writel(host, DWMCI_TMOUT, 0xFFFFFFFF);
  371. dwmci_writel(host, DWMCI_IDINTEN, 0);
  372. dwmci_writel(host, DWMCI_BMOD, 1);
  373. if (!host->fifoth_val) {
  374. uint32_t fifo_size;
  375. fifo_size = dwmci_readl(host, DWMCI_FIFOTH);
  376. fifo_size = ((fifo_size & RX_WMARK_MASK) >> RX_WMARK_SHIFT) + 1;
  377. host->fifoth_val = MSIZE(0x2) | RX_WMARK(fifo_size / 2 - 1) |
  378. TX_WMARK(fifo_size / 2);
  379. }
  380. dwmci_writel(host, DWMCI_FIFOTH, host->fifoth_val);
  381. dwmci_writel(host, DWMCI_CLKENA, 0);
  382. dwmci_writel(host, DWMCI_CLKSRC, 0);
  383. return 0;
  384. }
  385. #ifdef CONFIG_DM_MMC_OPS
  386. int dwmci_probe(struct udevice *dev)
  387. {
  388. struct mmc *mmc = mmc_get_mmc_dev(dev);
  389. return dwmci_init(mmc);
  390. }
  391. const struct dm_mmc_ops dm_dwmci_ops = {
  392. .send_cmd = dwmci_send_cmd,
  393. .set_ios = dwmci_set_ios,
  394. };
  395. #else
  396. static const struct mmc_ops dwmci_ops = {
  397. .send_cmd = dwmci_send_cmd,
  398. .set_ios = dwmci_set_ios,
  399. .init = dwmci_init,
  400. };
  401. #endif
  402. void dwmci_setup_cfg(struct mmc_config *cfg, struct dwmci_host *host,
  403. u32 max_clk, u32 min_clk)
  404. {
  405. cfg->name = host->name;
  406. #ifndef CONFIG_DM_MMC_OPS
  407. cfg->ops = &dwmci_ops;
  408. #endif
  409. cfg->f_min = min_clk;
  410. cfg->f_max = max_clk;
  411. cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
  412. cfg->host_caps = host->caps;
  413. if (host->buswidth == 8) {
  414. cfg->host_caps |= MMC_MODE_8BIT;
  415. cfg->host_caps &= ~MMC_MODE_4BIT;
  416. } else {
  417. cfg->host_caps |= MMC_MODE_4BIT;
  418. cfg->host_caps &= ~MMC_MODE_8BIT;
  419. }
  420. cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
  421. cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
  422. }
  423. #ifdef CONFIG_BLK
  424. int dwmci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
  425. {
  426. return mmc_bind(dev, mmc, cfg);
  427. }
  428. #else
  429. int add_dwmci(struct dwmci_host *host, u32 max_clk, u32 min_clk)
  430. {
  431. dwmci_setup_cfg(&host->cfg, host, max_clk, min_clk);
  432. host->mmc = mmc_create(&host->cfg, host);
  433. if (host->mmc == NULL)
  434. return -1;
  435. return 0;
  436. }
  437. #endif