bcm-sf2-eth-gmac.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967
  1. /*
  2. * Copyright 2014 Broadcom Corporation.
  3. *
  4. * SPDX-License-Identifier: GPL-2.0+
  5. */
  6. #ifdef BCM_GMAC_DEBUG
  7. #ifndef DEBUG
  8. #define DEBUG
  9. #endif
  10. #endif
  11. #include <config.h>
  12. #include <common.h>
  13. #include <malloc.h>
  14. #include <net.h>
  15. #include <asm/io.h>
  16. #include <phy.h>
  17. #include "bcm-sf2-eth.h"
  18. #include "bcm-sf2-eth-gmac.h"
  19. #define SPINWAIT(exp, us) { \
  20. uint countdown = (us) + 9; \
  21. while ((exp) && (countdown >= 10)) {\
  22. udelay(10); \
  23. countdown -= 10; \
  24. } \
  25. }
  26. static int gmac_disable_dma(struct eth_dma *dma, int dir);
  27. static int gmac_enable_dma(struct eth_dma *dma, int dir);
  28. /* DMA Descriptor */
  29. typedef struct {
  30. /* misc control bits */
  31. uint32_t ctrl1;
  32. /* buffer count and address extension */
  33. uint32_t ctrl2;
  34. /* memory address of the date buffer, bits 31:0 */
  35. uint32_t addrlow;
  36. /* memory address of the date buffer, bits 63:32 */
  37. uint32_t addrhigh;
  38. } dma64dd_t;
  39. uint32_t g_dmactrlflags;
  40. static uint32_t dma_ctrlflags(uint32_t mask, uint32_t flags)
  41. {
  42. debug("%s enter\n", __func__);
  43. g_dmactrlflags &= ~mask;
  44. g_dmactrlflags |= flags;
  45. /* If trying to enable parity, check if parity is actually supported */
  46. if (g_dmactrlflags & DMA_CTRL_PEN) {
  47. uint32_t control;
  48. control = readl(GMAC0_DMA_TX_CTRL_ADDR);
  49. writel(control | D64_XC_PD, GMAC0_DMA_TX_CTRL_ADDR);
  50. if (readl(GMAC0_DMA_TX_CTRL_ADDR) & D64_XC_PD) {
  51. /*
  52. * We *can* disable it, therefore it is supported;
  53. * restore control register
  54. */
  55. writel(control, GMAC0_DMA_TX_CTRL_ADDR);
  56. } else {
  57. /* Not supported, don't allow it to be enabled */
  58. g_dmactrlflags &= ~DMA_CTRL_PEN;
  59. }
  60. }
  61. return g_dmactrlflags;
  62. }
  63. static inline void reg32_clear_bits(uint32_t reg, uint32_t value)
  64. {
  65. uint32_t v = readl(reg);
  66. v &= ~(value);
  67. writel(v, reg);
  68. }
  69. static inline void reg32_set_bits(uint32_t reg, uint32_t value)
  70. {
  71. uint32_t v = readl(reg);
  72. v |= value;
  73. writel(v, reg);
  74. }
  75. #ifdef BCM_GMAC_DEBUG
  76. static void dma_tx_dump(struct eth_dma *dma)
  77. {
  78. dma64dd_t *descp = NULL;
  79. uint8_t *bufp;
  80. int i;
  81. printf("TX DMA Register:\n");
  82. printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n",
  83. readl(GMAC0_DMA_TX_CTRL_ADDR),
  84. readl(GMAC0_DMA_TX_PTR_ADDR),
  85. readl(GMAC0_DMA_TX_ADDR_LOW_ADDR),
  86. readl(GMAC0_DMA_TX_ADDR_HIGH_ADDR),
  87. readl(GMAC0_DMA_TX_STATUS0_ADDR),
  88. readl(GMAC0_DMA_TX_STATUS1_ADDR));
  89. printf("TX Descriptors:\n");
  90. for (i = 0; i < TX_BUF_NUM; i++) {
  91. descp = (dma64dd_t *)(dma->tx_desc_aligned) + i;
  92. printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n",
  93. descp->ctrl1, descp->ctrl2,
  94. descp->addrhigh, descp->addrlow);
  95. }
  96. printf("TX Buffers:\n");
  97. /* Initialize TX DMA descriptor table */
  98. for (i = 0; i < TX_BUF_NUM; i++) {
  99. bufp = (uint8_t *)(dma->tx_buf + i * TX_BUF_SIZE);
  100. printf("buf%d:0x%x; ", i, (uint32_t)bufp);
  101. }
  102. printf("\n");
  103. }
  104. static void dma_rx_dump(struct eth_dma *dma)
  105. {
  106. dma64dd_t *descp = NULL;
  107. uint8_t *bufp;
  108. int i;
  109. printf("RX DMA Register:\n");
  110. printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n",
  111. readl(GMAC0_DMA_RX_CTRL_ADDR),
  112. readl(GMAC0_DMA_RX_PTR_ADDR),
  113. readl(GMAC0_DMA_RX_ADDR_LOW_ADDR),
  114. readl(GMAC0_DMA_RX_ADDR_HIGH_ADDR),
  115. readl(GMAC0_DMA_RX_STATUS0_ADDR),
  116. readl(GMAC0_DMA_RX_STATUS1_ADDR));
  117. printf("RX Descriptors:\n");
  118. for (i = 0; i < RX_BUF_NUM; i++) {
  119. descp = (dma64dd_t *)(dma->rx_desc_aligned) + i;
  120. printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n",
  121. descp->ctrl1, descp->ctrl2,
  122. descp->addrhigh, descp->addrlow);
  123. }
  124. printf("RX Buffers:\n");
  125. for (i = 0; i < RX_BUF_NUM; i++) {
  126. bufp = dma->rx_buf + i * RX_BUF_SIZE;
  127. printf("buf%d:0x%x; ", i, (uint32_t)bufp);
  128. }
  129. printf("\n");
  130. }
  131. #endif
  132. static int dma_tx_init(struct eth_dma *dma)
  133. {
  134. dma64dd_t *descp = NULL;
  135. uint8_t *bufp;
  136. int i;
  137. uint32_t ctrl;
  138. debug("%s enter\n", __func__);
  139. /* clear descriptor memory */
  140. memset((void *)(dma->tx_desc_aligned), 0,
  141. TX_BUF_NUM * sizeof(dma64dd_t));
  142. memset(dma->tx_buf, 0, TX_BUF_NUM * TX_BUF_SIZE);
  143. /* Initialize TX DMA descriptor table */
  144. for (i = 0; i < TX_BUF_NUM; i++) {
  145. descp = (dma64dd_t *)(dma->tx_desc_aligned) + i;
  146. bufp = dma->tx_buf + i * TX_BUF_SIZE;
  147. /* clear buffer memory */
  148. memset((void *)bufp, 0, TX_BUF_SIZE);
  149. ctrl = 0;
  150. /* if last descr set endOfTable */
  151. if (i == (TX_BUF_NUM-1))
  152. ctrl = D64_CTRL1_EOT;
  153. descp->ctrl1 = ctrl;
  154. descp->ctrl2 = 0;
  155. descp->addrlow = (uint32_t)bufp;
  156. descp->addrhigh = 0;
  157. }
  158. /* flush descriptor and buffer */
  159. descp = dma->tx_desc_aligned;
  160. bufp = dma->tx_buf;
  161. flush_dcache_range((unsigned long)descp,
  162. (unsigned long)(descp +
  163. sizeof(dma64dd_t) * TX_BUF_NUM));
  164. flush_dcache_range((unsigned long)(bufp),
  165. (unsigned long)(bufp + TX_BUF_SIZE * TX_BUF_NUM));
  166. /* initialize the DMA channel */
  167. writel((uint32_t)(dma->tx_desc_aligned), GMAC0_DMA_TX_ADDR_LOW_ADDR);
  168. writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR);
  169. /* now update the dma last descriptor */
  170. writel(((uint32_t)(dma->tx_desc_aligned)) & D64_XP_LD_MASK,
  171. GMAC0_DMA_TX_PTR_ADDR);
  172. return 0;
  173. }
  174. static int dma_rx_init(struct eth_dma *dma)
  175. {
  176. uint32_t last_desc;
  177. dma64dd_t *descp = NULL;
  178. uint8_t *bufp;
  179. uint32_t ctrl;
  180. int i;
  181. debug("%s enter\n", __func__);
  182. /* clear descriptor memory */
  183. memset((void *)(dma->rx_desc_aligned), 0,
  184. RX_BUF_NUM * sizeof(dma64dd_t));
  185. /* clear buffer memory */
  186. memset(dma->rx_buf, 0, RX_BUF_NUM * RX_BUF_SIZE);
  187. /* Initialize RX DMA descriptor table */
  188. for (i = 0; i < RX_BUF_NUM; i++) {
  189. descp = (dma64dd_t *)(dma->rx_desc_aligned) + i;
  190. bufp = dma->rx_buf + i * RX_BUF_SIZE;
  191. ctrl = 0;
  192. /* if last descr set endOfTable */
  193. if (i == (RX_BUF_NUM - 1))
  194. ctrl = D64_CTRL1_EOT;
  195. descp->ctrl1 = ctrl;
  196. descp->ctrl2 = RX_BUF_SIZE;
  197. descp->addrlow = (uint32_t)bufp;
  198. descp->addrhigh = 0;
  199. last_desc = ((uint32_t)(descp) & D64_XP_LD_MASK)
  200. + sizeof(dma64dd_t);
  201. }
  202. descp = dma->rx_desc_aligned;
  203. bufp = dma->rx_buf;
  204. /* flush descriptor and buffer */
  205. flush_dcache_range((unsigned long)descp,
  206. (unsigned long)(descp +
  207. sizeof(dma64dd_t) * RX_BUF_NUM));
  208. flush_dcache_range((unsigned long)(bufp),
  209. (unsigned long)(bufp + RX_BUF_SIZE * RX_BUF_NUM));
  210. /* initailize the DMA channel */
  211. writel((uint32_t)descp, GMAC0_DMA_RX_ADDR_LOW_ADDR);
  212. writel(0, GMAC0_DMA_RX_ADDR_HIGH_ADDR);
  213. /* now update the dma last descriptor */
  214. writel(last_desc, GMAC0_DMA_RX_PTR_ADDR);
  215. return 0;
  216. }
  217. static int dma_init(struct eth_dma *dma)
  218. {
  219. debug(" %s enter\n", __func__);
  220. /*
  221. * Default flags: For backwards compatibility both
  222. * Rx Overflow Continue and Parity are DISABLED.
  223. */
  224. dma_ctrlflags(DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
  225. debug("rx burst len 0x%x\n",
  226. (readl(GMAC0_DMA_RX_CTRL_ADDR) & D64_RC_BL_MASK)
  227. >> D64_RC_BL_SHIFT);
  228. debug("tx burst len 0x%x\n",
  229. (readl(GMAC0_DMA_TX_CTRL_ADDR) & D64_XC_BL_MASK)
  230. >> D64_XC_BL_SHIFT);
  231. dma_tx_init(dma);
  232. dma_rx_init(dma);
  233. /* From end of chip_init() */
  234. /* enable the overflow continue feature and disable parity */
  235. dma_ctrlflags(DMA_CTRL_ROC | DMA_CTRL_PEN /* mask */,
  236. DMA_CTRL_ROC /* value */);
  237. return 0;
  238. }
  239. static int dma_deinit(struct eth_dma *dma)
  240. {
  241. debug(" %s enter\n", __func__);
  242. gmac_disable_dma(dma, MAC_DMA_RX);
  243. gmac_disable_dma(dma, MAC_DMA_TX);
  244. free(dma->tx_buf);
  245. dma->tx_buf = NULL;
  246. free(dma->tx_desc);
  247. dma->tx_desc = NULL;
  248. dma->tx_desc_aligned = NULL;
  249. free(dma->rx_buf);
  250. dma->rx_buf = NULL;
  251. free(dma->rx_desc);
  252. dma->rx_desc = NULL;
  253. dma->rx_desc_aligned = NULL;
  254. return 0;
  255. }
  256. int gmac_tx_packet(struct eth_dma *dma, void *packet, int length)
  257. {
  258. uint8_t *bufp = dma->tx_buf + dma->cur_tx_index * TX_BUF_SIZE;
  259. /* kick off the dma */
  260. size_t len = length;
  261. int txout = dma->cur_tx_index;
  262. uint32_t flags;
  263. dma64dd_t *descp = NULL;
  264. uint32_t ctrl;
  265. uint32_t last_desc = (((uint32_t)dma->tx_desc_aligned) +
  266. sizeof(dma64dd_t)) & D64_XP_LD_MASK;
  267. size_t buflen;
  268. debug("%s enter\n", __func__);
  269. /* load the buffer */
  270. memcpy(bufp, packet, len);
  271. /* Add 4 bytes for Ethernet FCS/CRC */
  272. buflen = len + 4;
  273. ctrl = (buflen & D64_CTRL2_BC_MASK);
  274. /* the transmit will only be one frame or set SOF, EOF */
  275. /* also set int on completion */
  276. flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF;
  277. /* txout points to the descriptor to uset */
  278. /* if last descriptor then set EOT */
  279. if (txout == (TX_BUF_NUM - 1)) {
  280. flags |= D64_CTRL1_EOT;
  281. last_desc = ((uint32_t)(dma->tx_desc_aligned)) & D64_XP_LD_MASK;
  282. }
  283. /* write the descriptor */
  284. descp = ((dma64dd_t *)(dma->tx_desc_aligned)) + txout;
  285. descp->addrlow = (uint32_t)bufp;
  286. descp->addrhigh = 0;
  287. descp->ctrl1 = flags;
  288. descp->ctrl2 = ctrl;
  289. /* flush descriptor and buffer */
  290. flush_dcache_range((unsigned long)descp,
  291. (unsigned long)(descp + sizeof(dma64dd_t)));
  292. flush_dcache_range((unsigned long)bufp,
  293. (unsigned long)(bufp + TX_BUF_SIZE));
  294. /* now update the dma last descriptor */
  295. writel(last_desc, GMAC0_DMA_TX_PTR_ADDR);
  296. /* tx dma should be enabled so packet should go out */
  297. /* update txout */
  298. dma->cur_tx_index = (txout + 1) & (TX_BUF_NUM - 1);
  299. return 0;
  300. }
  301. bool gmac_check_tx_done(struct eth_dma *dma)
  302. {
  303. /* wait for tx to complete */
  304. uint32_t intstatus;
  305. bool xfrdone = false;
  306. debug("%s enter\n", __func__);
  307. intstatus = readl(GMAC0_INT_STATUS_ADDR);
  308. debug("int(0x%x)\n", intstatus);
  309. if (intstatus & (I_XI0 | I_XI1 | I_XI2 | I_XI3)) {
  310. xfrdone = true;
  311. /* clear the int bits */
  312. intstatus &= ~(I_XI0 | I_XI1 | I_XI2 | I_XI3);
  313. writel(intstatus, GMAC0_INT_STATUS_ADDR);
  314. } else {
  315. debug("Tx int(0x%x)\n", intstatus);
  316. }
  317. return xfrdone;
  318. }
  319. int gmac_check_rx_done(struct eth_dma *dma, uint8_t *buf)
  320. {
  321. void *bufp, *datap;
  322. size_t rcvlen = 0, buflen = 0;
  323. uint32_t stat0 = 0, stat1 = 0;
  324. uint32_t control, offset;
  325. uint8_t statbuf[HWRXOFF*2];
  326. int index, curr, active;
  327. dma64dd_t *descp = NULL;
  328. /* udelay(50); */
  329. /*
  330. * this api will check if a packet has been received.
  331. * If so it will return the address of the buffer and current
  332. * descriptor index will be incremented to the
  333. * next descriptor. Once done with the frame the buffer should be
  334. * added back onto the descriptor and the lastdscr should be updated
  335. * to this descriptor.
  336. */
  337. index = dma->cur_rx_index;
  338. offset = (uint32_t)(dma->rx_desc_aligned);
  339. stat0 = readl(GMAC0_DMA_RX_STATUS0_ADDR) & D64_RS0_CD_MASK;
  340. stat1 = readl(GMAC0_DMA_RX_STATUS1_ADDR) & D64_RS0_CD_MASK;
  341. curr = ((stat0 - offset) & D64_RS0_CD_MASK) / sizeof(dma64dd_t);
  342. active = ((stat1 - offset) & D64_RS0_CD_MASK) / sizeof(dma64dd_t);
  343. /* check if any frame */
  344. if (index == curr)
  345. return -1;
  346. debug("received packet\n");
  347. debug("expect(0x%x) curr(0x%x) active(0x%x)\n", index, curr, active);
  348. /* remove warning */
  349. if (index == active)
  350. ;
  351. /* get the packet pointer that corresponds to the rx descriptor */
  352. bufp = dma->rx_buf + index * RX_BUF_SIZE;
  353. descp = (dma64dd_t *)(dma->rx_desc_aligned) + index;
  354. /* flush descriptor and buffer */
  355. flush_dcache_range((unsigned long)descp,
  356. (unsigned long)(descp + sizeof(dma64dd_t)));
  357. flush_dcache_range((unsigned long)bufp,
  358. (unsigned long)(bufp + RX_BUF_SIZE));
  359. buflen = (descp->ctrl2 & D64_CTRL2_BC_MASK);
  360. stat0 = readl(GMAC0_DMA_RX_STATUS0_ADDR);
  361. stat1 = readl(GMAC0_DMA_RX_STATUS1_ADDR);
  362. debug("bufp(0x%x) index(0x%x) buflen(0x%x) stat0(0x%x) stat1(0x%x)\n",
  363. (uint32_t)bufp, index, buflen, stat0, stat1);
  364. dma->cur_rx_index = (index + 1) & (RX_BUF_NUM - 1);
  365. /* get buffer offset */
  366. control = readl(GMAC0_DMA_RX_CTRL_ADDR);
  367. offset = (control & D64_RC_RO_MASK) >> D64_RC_RO_SHIFT;
  368. rcvlen = *(uint16_t *)bufp;
  369. debug("Received %d bytes\n", rcvlen);
  370. /* copy status into temp buf then copy data from rx buffer */
  371. memcpy(statbuf, bufp, offset);
  372. datap = (void *)((uint32_t)bufp + offset);
  373. memcpy(buf, datap, rcvlen);
  374. /* update descriptor that is being added back on ring */
  375. descp->ctrl2 = RX_BUF_SIZE;
  376. descp->addrlow = (uint32_t)bufp;
  377. descp->addrhigh = 0;
  378. /* flush descriptor */
  379. flush_dcache_range((unsigned long)descp,
  380. (unsigned long)(descp + sizeof(dma64dd_t)));
  381. /* set the lastdscr for the rx ring */
  382. writel(((uint32_t)descp) & D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR);
  383. return (int)rcvlen;
  384. }
  385. static int gmac_disable_dma(struct eth_dma *dma, int dir)
  386. {
  387. int status;
  388. debug("%s enter\n", __func__);
  389. if (dir == MAC_DMA_TX) {
  390. /* address PR8249/PR7577 issue */
  391. /* suspend tx DMA first */
  392. writel(D64_XC_SE, GMAC0_DMA_TX_CTRL_ADDR);
  393. SPINWAIT(((status = (readl(GMAC0_DMA_TX_STATUS0_ADDR) &
  394. D64_XS0_XS_MASK)) !=
  395. D64_XS0_XS_DISABLED) &&
  396. (status != D64_XS0_XS_IDLE) &&
  397. (status != D64_XS0_XS_STOPPED), 10000);
  398. /*
  399. * PR2414 WAR: DMA engines are not disabled until
  400. * transfer finishes
  401. */
  402. writel(0, GMAC0_DMA_TX_CTRL_ADDR);
  403. SPINWAIT(((status = (readl(GMAC0_DMA_TX_STATUS0_ADDR) &
  404. D64_XS0_XS_MASK)) !=
  405. D64_XS0_XS_DISABLED), 10000);
  406. /* wait for the last transaction to complete */
  407. udelay(2);
  408. status = (status == D64_XS0_XS_DISABLED);
  409. } else {
  410. /*
  411. * PR2414 WAR: DMA engines are not disabled until
  412. * transfer finishes
  413. */
  414. writel(0, GMAC0_DMA_RX_CTRL_ADDR);
  415. SPINWAIT(((status = (readl(GMAC0_DMA_RX_STATUS0_ADDR) &
  416. D64_RS0_RS_MASK)) !=
  417. D64_RS0_RS_DISABLED), 10000);
  418. status = (status == D64_RS0_RS_DISABLED);
  419. }
  420. return status;
  421. }
  422. static int gmac_enable_dma(struct eth_dma *dma, int dir)
  423. {
  424. uint32_t control;
  425. debug("%s enter\n", __func__);
  426. if (dir == MAC_DMA_TX) {
  427. dma->cur_tx_index = 0;
  428. /*
  429. * These bits 20:18 (burstLen) of control register can be
  430. * written but will take effect only if these bits are
  431. * valid. So this will not affect previous versions
  432. * of the DMA. They will continue to have those bits set to 0.
  433. */
  434. control = readl(GMAC0_DMA_TX_CTRL_ADDR);
  435. control |= D64_XC_XE;
  436. if ((g_dmactrlflags & DMA_CTRL_PEN) == 0)
  437. control |= D64_XC_PD;
  438. writel(control, GMAC0_DMA_TX_CTRL_ADDR);
  439. /* initailize the DMA channel */
  440. writel((uint32_t)(dma->tx_desc_aligned),
  441. GMAC0_DMA_TX_ADDR_LOW_ADDR);
  442. writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR);
  443. } else {
  444. dma->cur_rx_index = 0;
  445. control = (readl(GMAC0_DMA_RX_CTRL_ADDR) &
  446. D64_RC_AE) | D64_RC_RE;
  447. if ((g_dmactrlflags & DMA_CTRL_PEN) == 0)
  448. control |= D64_RC_PD;
  449. if (g_dmactrlflags & DMA_CTRL_ROC)
  450. control |= D64_RC_OC;
  451. /*
  452. * These bits 20:18 (burstLen) of control register can be
  453. * written but will take effect only if these bits are
  454. * valid. So this will not affect previous versions
  455. * of the DMA. They will continue to have those bits set to 0.
  456. */
  457. control &= ~D64_RC_BL_MASK;
  458. /* Keep default Rx burstlen */
  459. control |= readl(GMAC0_DMA_RX_CTRL_ADDR) & D64_RC_BL_MASK;
  460. control |= HWRXOFF << D64_RC_RO_SHIFT;
  461. writel(control, GMAC0_DMA_RX_CTRL_ADDR);
  462. /*
  463. * the rx descriptor ring should have
  464. * the addresses set properly;
  465. * set the lastdscr for the rx ring
  466. */
  467. writel(((uint32_t)(dma->rx_desc_aligned) +
  468. (RX_BUF_NUM - 1) * RX_BUF_SIZE) &
  469. D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR);
  470. }
  471. return 0;
  472. }
  473. bool gmac_mii_busywait(unsigned int timeout)
  474. {
  475. uint32_t tmp = 0;
  476. while (timeout > 10) {
  477. tmp = readl(GMAC_MII_CTRL_ADDR);
  478. if (tmp & (1 << GMAC_MII_BUSY_SHIFT)) {
  479. udelay(10);
  480. timeout -= 10;
  481. } else {
  482. break;
  483. }
  484. }
  485. return tmp & (1 << GMAC_MII_BUSY_SHIFT);
  486. }
  487. int gmac_miiphy_read(struct mii_dev *bus, int phyaddr, int devad, int reg)
  488. {
  489. uint32_t tmp = 0;
  490. u16 value = 0;
  491. /* Busy wait timeout is 1ms */
  492. if (gmac_mii_busywait(1000)) {
  493. error("%s: Prepare MII read: MII/MDIO busy\n", __func__);
  494. return -1;
  495. }
  496. /* Read operation */
  497. tmp = GMAC_MII_DATA_READ_CMD;
  498. tmp |= (phyaddr << GMAC_MII_PHY_ADDR_SHIFT) |
  499. (reg << GMAC_MII_PHY_REG_SHIFT);
  500. debug("MII read cmd 0x%x, phy 0x%x, reg 0x%x\n", tmp, phyaddr, reg);
  501. writel(tmp, GMAC_MII_DATA_ADDR);
  502. if (gmac_mii_busywait(1000)) {
  503. error("%s: MII read failure: MII/MDIO busy\n", __func__);
  504. return -1;
  505. }
  506. value = readl(GMAC_MII_DATA_ADDR) & 0xffff;
  507. debug("MII read data 0x%x\n", value);
  508. return value;
  509. }
  510. int gmac_miiphy_write(struct mii_dev *bus, int phyaddr, int devad, int reg,
  511. u16 value)
  512. {
  513. uint32_t tmp = 0;
  514. /* Busy wait timeout is 1ms */
  515. if (gmac_mii_busywait(1000)) {
  516. error("%s: Prepare MII write: MII/MDIO busy\n", __func__);
  517. return -1;
  518. }
  519. /* Write operation */
  520. tmp = GMAC_MII_DATA_WRITE_CMD | (value & 0xffff);
  521. tmp |= ((phyaddr << GMAC_MII_PHY_ADDR_SHIFT) |
  522. (reg << GMAC_MII_PHY_REG_SHIFT));
  523. debug("MII write cmd 0x%x, phy 0x%x, reg 0x%x, data 0x%x\n",
  524. tmp, phyaddr, reg, value);
  525. writel(tmp, GMAC_MII_DATA_ADDR);
  526. if (gmac_mii_busywait(1000)) {
  527. error("%s: MII write failure: MII/MDIO busy\n", __func__);
  528. return -1;
  529. }
  530. return 0;
  531. }
  532. void gmac_init_reset(void)
  533. {
  534. debug("%s enter\n", __func__);
  535. /* set command config reg CC_SR */
  536. reg32_set_bits(UNIMAC0_CMD_CFG_ADDR, CC_SR);
  537. udelay(GMAC_RESET_DELAY);
  538. }
  539. void gmac_clear_reset(void)
  540. {
  541. debug("%s enter\n", __func__);
  542. /* clear command config reg CC_SR */
  543. reg32_clear_bits(UNIMAC0_CMD_CFG_ADDR, CC_SR);
  544. udelay(GMAC_RESET_DELAY);
  545. }
  546. static void gmac_enable_local(bool en)
  547. {
  548. uint32_t cmdcfg;
  549. debug("%s enter\n", __func__);
  550. /* read command config reg */
  551. cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
  552. /* put mac in reset */
  553. gmac_init_reset();
  554. cmdcfg |= CC_SR;
  555. /* first deassert rx_ena and tx_ena while in reset */
  556. cmdcfg &= ~(CC_RE | CC_TE);
  557. /* write command config reg */
  558. writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
  559. /* bring mac out of reset */
  560. gmac_clear_reset();
  561. /* if not enable exit now */
  562. if (!en)
  563. return;
  564. /* enable the mac transmit and receive paths now */
  565. udelay(2);
  566. cmdcfg &= ~CC_SR;
  567. cmdcfg |= (CC_RE | CC_TE);
  568. /* assert rx_ena and tx_ena when out of reset to enable the mac */
  569. writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
  570. return;
  571. }
  572. int gmac_enable(void)
  573. {
  574. gmac_enable_local(1);
  575. /* clear interrupts */
  576. writel(I_INTMASK, GMAC0_INT_STATUS_ADDR);
  577. return 0;
  578. }
  579. int gmac_disable(void)
  580. {
  581. gmac_enable_local(0);
  582. return 0;
  583. }
  584. int gmac_set_speed(int speed, int duplex)
  585. {
  586. uint32_t cmdcfg;
  587. uint32_t hd_ena;
  588. uint32_t speed_cfg;
  589. hd_ena = duplex ? 0 : CC_HD;
  590. if (speed == 1000) {
  591. speed_cfg = 2;
  592. } else if (speed == 100) {
  593. speed_cfg = 1;
  594. } else if (speed == 10) {
  595. speed_cfg = 0;
  596. } else {
  597. error("%s: Invalid GMAC speed(%d)!\n", __func__, speed);
  598. return -1;
  599. }
  600. cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
  601. cmdcfg &= ~(CC_ES_MASK | CC_HD);
  602. cmdcfg |= ((speed_cfg << CC_ES_SHIFT) | hd_ena);
  603. printf("Change GMAC speed to %dMB\n", speed);
  604. debug("GMAC speed cfg 0x%x\n", cmdcfg);
  605. writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
  606. return 0;
  607. }
  608. int gmac_set_mac_addr(unsigned char *mac)
  609. {
  610. /* set our local address */
  611. debug("GMAC: %02x:%02x:%02x:%02x:%02x:%02x\n",
  612. mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
  613. writel(htonl(*(uint32_t *)mac), UNIMAC0_MAC_MSB_ADDR);
  614. writew(htons(*(uint32_t *)&mac[4]), UNIMAC0_MAC_LSB_ADDR);
  615. return 0;
  616. }
  617. int gmac_mac_init(struct eth_device *dev)
  618. {
  619. struct eth_info *eth = (struct eth_info *)(dev->priv);
  620. struct eth_dma *dma = &(eth->dma);
  621. uint32_t tmp;
  622. uint32_t cmdcfg;
  623. int chipid;
  624. debug("%s enter\n", __func__);
  625. /* Always use GMAC0 */
  626. printf("Using GMAC%d\n", 0);
  627. /* Reset AMAC0 core */
  628. writel(0, AMAC0_IDM_RESET_ADDR);
  629. tmp = readl(AMAC0_IO_CTRL_DIRECT_ADDR);
  630. /* Set clock */
  631. tmp &= ~(1 << AMAC0_IO_CTRL_CLK_250_SEL_SHIFT);
  632. tmp |= (1 << AMAC0_IO_CTRL_GMII_MODE_SHIFT);
  633. /* Set Tx clock */
  634. tmp &= ~(1 << AMAC0_IO_CTRL_DEST_SYNC_MODE_EN_SHIFT);
  635. writel(tmp, AMAC0_IO_CTRL_DIRECT_ADDR);
  636. /* reset gmac */
  637. /*
  638. * As AMAC is just reset, NO need?
  639. * set eth_data into loopback mode to ensure no rx traffic
  640. * gmac_loopback(eth_data, TRUE);
  641. * ET_TRACE(("%s gmac loopback\n", __func__));
  642. * udelay(1);
  643. */
  644. cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
  645. cmdcfg &= ~(CC_TE | CC_RE | CC_RPI | CC_TAI | CC_HD | CC_ML |
  646. CC_CFE | CC_RL | CC_RED | CC_PE | CC_TPI |
  647. CC_PAD_EN | CC_PF);
  648. cmdcfg |= (CC_PROM | CC_NLC | CC_CFE);
  649. /* put mac in reset */
  650. gmac_init_reset();
  651. writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
  652. gmac_clear_reset();
  653. /* enable clear MIB on read */
  654. reg32_set_bits(GMAC0_DEV_CTRL_ADDR, DC_MROR);
  655. /* PHY: set smi_master to drive mdc_clk */
  656. reg32_set_bits(GMAC0_PHY_CTRL_ADDR, PC_MTE);
  657. /* clear persistent sw intstatus */
  658. writel(0, GMAC0_INT_STATUS_ADDR);
  659. if (dma_init(dma) < 0) {
  660. error("%s: GMAC dma_init failed\n", __func__);
  661. goto err_exit;
  662. }
  663. chipid = CHIPID;
  664. printf("%s: Chip ID: 0x%x\n", __func__, chipid);
  665. /* set switch bypass mode */
  666. tmp = readl(SWITCH_GLOBAL_CONFIG_ADDR);
  667. tmp |= (1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT);
  668. /* Switch mode */
  669. /* tmp &= ~(1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT); */
  670. writel(tmp, SWITCH_GLOBAL_CONFIG_ADDR);
  671. tmp = readl(CRMU_CHIP_IO_PAD_CONTROL_ADDR);
  672. tmp &= ~(1 << CDRU_IOMUX_FORCE_PAD_IN_SHIFT);
  673. writel(tmp, CRMU_CHIP_IO_PAD_CONTROL_ADDR);
  674. /* Set MDIO to internal GPHY */
  675. tmp = readl(GMAC_MII_CTRL_ADDR);
  676. /* Select internal MDC/MDIO bus*/
  677. tmp &= ~(1 << GMAC_MII_CTRL_BYP_SHIFT);
  678. /* select MDC/MDIO connecting to on-chip internal PHYs */
  679. tmp &= ~(1 << GMAC_MII_CTRL_EXT_SHIFT);
  680. /*
  681. * give bit[6:0](MDCDIV) with required divisor to set
  682. * the MDC clock frequency, 66MHZ/0x1A=2.5MHZ
  683. */
  684. tmp |= 0x1A;
  685. writel(tmp, GMAC_MII_CTRL_ADDR);
  686. if (gmac_mii_busywait(1000)) {
  687. error("%s: Configure MDIO: MII/MDIO busy\n", __func__);
  688. goto err_exit;
  689. }
  690. /* Configure GMAC0 */
  691. /* enable one rx interrupt per received frame */
  692. writel(1 << GMAC0_IRL_FRAMECOUNT_SHIFT, GMAC0_INTR_RECV_LAZY_ADDR);
  693. /* read command config reg */
  694. cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
  695. /* enable 802.3x tx flow control (honor received PAUSE frames) */
  696. cmdcfg &= ~CC_RPI;
  697. /* enable promiscuous mode */
  698. cmdcfg |= CC_PROM;
  699. /* Disable loopback mode */
  700. cmdcfg &= ~CC_ML;
  701. /* set the speed */
  702. cmdcfg &= ~(CC_ES_MASK | CC_HD);
  703. /* Set to 1Gbps and full duplex by default */
  704. cmdcfg |= (2 << CC_ES_SHIFT);
  705. /* put mac in reset */
  706. gmac_init_reset();
  707. /* write register */
  708. writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
  709. /* bring mac out of reset */
  710. gmac_clear_reset();
  711. /* set max frame lengths; account for possible vlan tag */
  712. writel(PKTSIZE + 32, UNIMAC0_FRM_LENGTH_ADDR);
  713. return 0;
  714. err_exit:
  715. dma_deinit(dma);
  716. return -1;
  717. }
  718. int gmac_add(struct eth_device *dev)
  719. {
  720. struct eth_info *eth = (struct eth_info *)(dev->priv);
  721. struct eth_dma *dma = &(eth->dma);
  722. void *tmp;
  723. /*
  724. * Desc has to be 16-byte aligned ?
  725. * If it is 8-byte aligned by malloc, fail Tx
  726. */
  727. tmp = malloc(sizeof(dma64dd_t) * TX_BUF_NUM + 8);
  728. if (tmp == NULL) {
  729. printf("%s: Failed to allocate TX desc Buffer\n", __func__);
  730. return -1;
  731. }
  732. dma->tx_desc = (void *)tmp;
  733. dma->tx_desc_aligned = (void *)(((uint32_t)tmp) & (~0xf));
  734. debug("TX Descriptor Buffer: %p; length: 0x%x\n",
  735. dma->tx_desc_aligned, sizeof(dma64dd_t) * TX_BUF_NUM);
  736. tmp = malloc(TX_BUF_SIZE * TX_BUF_NUM);
  737. if (tmp == NULL) {
  738. printf("%s: Failed to allocate TX Data Buffer\n", __func__);
  739. free(dma->tx_desc);
  740. return -1;
  741. }
  742. dma->tx_buf = (uint8_t *)tmp;
  743. debug("TX Data Buffer: %p; length: 0x%x\n",
  744. dma->tx_buf, TX_BUF_SIZE * TX_BUF_NUM);
  745. /* Desc has to be 16-byte aligned ? */
  746. tmp = malloc(sizeof(dma64dd_t) * RX_BUF_NUM + 8);
  747. if (tmp == NULL) {
  748. printf("%s: Failed to allocate RX Descriptor\n", __func__);
  749. free(dma->tx_desc);
  750. free(dma->tx_buf);
  751. return -1;
  752. }
  753. dma->rx_desc = tmp;
  754. dma->rx_desc_aligned = (void *)(((uint32_t)tmp) & (~0xf));
  755. debug("RX Descriptor Buffer: %p, length: 0x%x\n",
  756. dma->rx_desc_aligned, sizeof(dma64dd_t) * RX_BUF_NUM);
  757. tmp = malloc(RX_BUF_SIZE * RX_BUF_NUM);
  758. if (tmp == NULL) {
  759. printf("%s: Failed to allocate RX Data Buffer\n", __func__);
  760. free(dma->tx_desc);
  761. free(dma->tx_buf);
  762. free(dma->rx_desc);
  763. return -1;
  764. }
  765. dma->rx_buf = tmp;
  766. debug("RX Data Buffer: %p; length: 0x%x\n",
  767. dma->rx_buf, RX_BUF_SIZE * RX_BUF_NUM);
  768. g_dmactrlflags = 0;
  769. eth->phy_interface = PHY_INTERFACE_MODE_GMII;
  770. dma->tx_packet = gmac_tx_packet;
  771. dma->check_tx_done = gmac_check_tx_done;
  772. dma->check_rx_done = gmac_check_rx_done;
  773. dma->enable_dma = gmac_enable_dma;
  774. dma->disable_dma = gmac_disable_dma;
  775. eth->miiphy_read = gmac_miiphy_read;
  776. eth->miiphy_write = gmac_miiphy_write;
  777. eth->mac_init = gmac_mac_init;
  778. eth->disable_mac = gmac_disable;
  779. eth->enable_mac = gmac_enable;
  780. eth->set_mac_addr = gmac_set_mac_addr;
  781. eth->set_mac_speed = gmac_set_speed;
  782. return 0;
  783. }