cpsw.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471
  1. /*
  2. * CPSW Ethernet Switch Driver
  3. *
  4. * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License as
  8. * published by the Free Software Foundation version 2.
  9. *
  10. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  11. * kind, whether express or implied; without even the implied warranty
  12. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #include <common.h>
  16. #include <command.h>
  17. #include <net.h>
  18. #include <miiphy.h>
  19. #include <malloc.h>
  20. #include <net.h>
  21. #include <netdev.h>
  22. #include <cpsw.h>
  23. #include <linux/errno.h>
  24. #include <asm/gpio.h>
  25. #include <asm/io.h>
  26. #include <phy.h>
  27. #include <asm/arch/cpu.h>
  28. #include <dm.h>
  29. #include <fdt_support.h>
  30. DECLARE_GLOBAL_DATA_PTR;
  31. #define BITMASK(bits) (BIT(bits) - 1)
  32. #define PHY_REG_MASK 0x1f
  33. #define PHY_ID_MASK 0x1f
  34. #define NUM_DESCS (PKTBUFSRX * 2)
  35. #define PKT_MIN 60
  36. #define PKT_MAX (1500 + 14 + 4 + 4)
  37. #define CLEAR_BIT 1
  38. #define GIGABITEN BIT(7)
  39. #define FULLDUPLEXEN BIT(0)
  40. #define MIIEN BIT(15)
  41. /* reg offset */
  42. #define CPSW_HOST_PORT_OFFSET 0x108
  43. #define CPSW_SLAVE0_OFFSET 0x208
  44. #define CPSW_SLAVE1_OFFSET 0x308
  45. #define CPSW_SLAVE_SIZE 0x100
  46. #define CPSW_CPDMA_OFFSET 0x800
  47. #define CPSW_HW_STATS 0x900
  48. #define CPSW_STATERAM_OFFSET 0xa00
  49. #define CPSW_CPTS_OFFSET 0xc00
  50. #define CPSW_ALE_OFFSET 0xd00
  51. #define CPSW_SLIVER0_OFFSET 0xd80
  52. #define CPSW_SLIVER1_OFFSET 0xdc0
  53. #define CPSW_BD_OFFSET 0x2000
  54. #define CPSW_MDIO_DIV 0xff
  55. #define AM335X_GMII_SEL_OFFSET 0x630
  56. /* DMA Registers */
  57. #define CPDMA_TXCONTROL 0x004
  58. #define CPDMA_RXCONTROL 0x014
  59. #define CPDMA_SOFTRESET 0x01c
  60. #define CPDMA_RXFREE 0x0e0
  61. #define CPDMA_TXHDP_VER1 0x100
  62. #define CPDMA_TXHDP_VER2 0x200
  63. #define CPDMA_RXHDP_VER1 0x120
  64. #define CPDMA_RXHDP_VER2 0x220
  65. #define CPDMA_TXCP_VER1 0x140
  66. #define CPDMA_TXCP_VER2 0x240
  67. #define CPDMA_RXCP_VER1 0x160
  68. #define CPDMA_RXCP_VER2 0x260
  69. /* Descriptor mode bits */
  70. #define CPDMA_DESC_SOP BIT(31)
  71. #define CPDMA_DESC_EOP BIT(30)
  72. #define CPDMA_DESC_OWNER BIT(29)
  73. #define CPDMA_DESC_EOQ BIT(28)
  74. /*
  75. * This timeout definition is a worst-case ultra defensive measure against
  76. * unexpected controller lock ups. Ideally, we should never ever hit this
  77. * scenario in practice.
  78. */
  79. #define MDIO_TIMEOUT 100 /* msecs */
  80. #define CPDMA_TIMEOUT 100 /* msecs */
  81. struct cpsw_mdio_regs {
  82. u32 version;
  83. u32 control;
  84. #define CONTROL_IDLE BIT(31)
  85. #define CONTROL_ENABLE BIT(30)
  86. u32 alive;
  87. u32 link;
  88. u32 linkintraw;
  89. u32 linkintmasked;
  90. u32 __reserved_0[2];
  91. u32 userintraw;
  92. u32 userintmasked;
  93. u32 userintmaskset;
  94. u32 userintmaskclr;
  95. u32 __reserved_1[20];
  96. struct {
  97. u32 access;
  98. u32 physel;
  99. #define USERACCESS_GO BIT(31)
  100. #define USERACCESS_WRITE BIT(30)
  101. #define USERACCESS_ACK BIT(29)
  102. #define USERACCESS_READ (0)
  103. #define USERACCESS_DATA (0xffff)
  104. } user[0];
  105. };
  106. struct cpsw_regs {
  107. u32 id_ver;
  108. u32 control;
  109. u32 soft_reset;
  110. u32 stat_port_en;
  111. u32 ptype;
  112. };
  113. struct cpsw_slave_regs {
  114. u32 max_blks;
  115. u32 blk_cnt;
  116. u32 flow_thresh;
  117. u32 port_vlan;
  118. u32 tx_pri_map;
  119. #ifdef CONFIG_AM33XX
  120. u32 gap_thresh;
  121. #elif defined(CONFIG_TI814X)
  122. u32 ts_ctl;
  123. u32 ts_seq_ltype;
  124. u32 ts_vlan;
  125. #endif
  126. u32 sa_lo;
  127. u32 sa_hi;
  128. };
  129. struct cpsw_host_regs {
  130. u32 max_blks;
  131. u32 blk_cnt;
  132. u32 flow_thresh;
  133. u32 port_vlan;
  134. u32 tx_pri_map;
  135. u32 cpdma_tx_pri_map;
  136. u32 cpdma_rx_chan_map;
  137. };
  138. struct cpsw_sliver_regs {
  139. u32 id_ver;
  140. u32 mac_control;
  141. u32 mac_status;
  142. u32 soft_reset;
  143. u32 rx_maxlen;
  144. u32 __reserved_0;
  145. u32 rx_pause;
  146. u32 tx_pause;
  147. u32 __reserved_1;
  148. u32 rx_pri_map;
  149. };
  150. #define ALE_ENTRY_BITS 68
  151. #define ALE_ENTRY_WORDS DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
  152. /* ALE Registers */
  153. #define ALE_CONTROL 0x08
  154. #define ALE_UNKNOWNVLAN 0x18
  155. #define ALE_TABLE_CONTROL 0x20
  156. #define ALE_TABLE 0x34
  157. #define ALE_PORTCTL 0x40
  158. #define ALE_TABLE_WRITE BIT(31)
  159. #define ALE_TYPE_FREE 0
  160. #define ALE_TYPE_ADDR 1
  161. #define ALE_TYPE_VLAN 2
  162. #define ALE_TYPE_VLAN_ADDR 3
  163. #define ALE_UCAST_PERSISTANT 0
  164. #define ALE_UCAST_UNTOUCHED 1
  165. #define ALE_UCAST_OUI 2
  166. #define ALE_UCAST_TOUCHED 3
  167. #define ALE_MCAST_FWD 0
  168. #define ALE_MCAST_BLOCK_LEARN_FWD 1
  169. #define ALE_MCAST_FWD_LEARN 2
  170. #define ALE_MCAST_FWD_2 3
  171. enum cpsw_ale_port_state {
  172. ALE_PORT_STATE_DISABLE = 0x00,
  173. ALE_PORT_STATE_BLOCK = 0x01,
  174. ALE_PORT_STATE_LEARN = 0x02,
  175. ALE_PORT_STATE_FORWARD = 0x03,
  176. };
  177. /* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
  178. #define ALE_SECURE 1
  179. #define ALE_BLOCKED 2
  180. struct cpsw_slave {
  181. struct cpsw_slave_regs *regs;
  182. struct cpsw_sliver_regs *sliver;
  183. int slave_num;
  184. u32 mac_control;
  185. struct cpsw_slave_data *data;
  186. };
  187. struct cpdma_desc {
  188. /* hardware fields */
  189. u32 hw_next;
  190. u32 hw_buffer;
  191. u32 hw_len;
  192. u32 hw_mode;
  193. /* software fields */
  194. u32 sw_buffer;
  195. u32 sw_len;
  196. };
  197. struct cpdma_chan {
  198. struct cpdma_desc *head, *tail;
  199. void *hdp, *cp, *rxfree;
  200. };
  201. /* AM33xx SoC specific definitions for the CONTROL port */
  202. #define AM33XX_GMII_SEL_MODE_MII 0
  203. #define AM33XX_GMII_SEL_MODE_RMII 1
  204. #define AM33XX_GMII_SEL_MODE_RGMII 2
  205. #define AM33XX_GMII_SEL_RGMII1_IDMODE BIT(4)
  206. #define AM33XX_GMII_SEL_RGMII2_IDMODE BIT(5)
  207. #define AM33XX_GMII_SEL_RMII1_IO_CLK_EN BIT(6)
  208. #define AM33XX_GMII_SEL_RMII2_IO_CLK_EN BIT(7)
  209. #define GMII_SEL_MODE_MASK 0x3
  210. #define desc_write(desc, fld, val) __raw_writel((u32)(val), &(desc)->fld)
  211. #define desc_read(desc, fld) __raw_readl(&(desc)->fld)
  212. #define desc_read_ptr(desc, fld) ((void *)__raw_readl(&(desc)->fld))
  213. #define chan_write(chan, fld, val) __raw_writel((u32)(val), (chan)->fld)
  214. #define chan_read(chan, fld) __raw_readl((chan)->fld)
  215. #define chan_read_ptr(chan, fld) ((void *)__raw_readl((chan)->fld))
  216. #define for_active_slave(slave, priv) \
  217. slave = (priv)->slaves + (priv)->data.active_slave; if (slave)
  218. #define for_each_slave(slave, priv) \
  219. for (slave = (priv)->slaves; slave != (priv)->slaves + \
  220. (priv)->data.slaves; slave++)
  221. struct cpsw_priv {
  222. #ifdef CONFIG_DM_ETH
  223. struct udevice *dev;
  224. #else
  225. struct eth_device *dev;
  226. #endif
  227. struct cpsw_platform_data data;
  228. int host_port;
  229. struct cpsw_regs *regs;
  230. void *dma_regs;
  231. struct cpsw_host_regs *host_port_regs;
  232. void *ale_regs;
  233. struct cpdma_desc *descs;
  234. struct cpdma_desc *desc_free;
  235. struct cpdma_chan rx_chan, tx_chan;
  236. struct cpsw_slave *slaves;
  237. struct phy_device *phydev;
  238. struct mii_dev *bus;
  239. u32 phy_mask;
  240. };
  241. static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
  242. {
  243. int idx;
  244. idx = start / 32;
  245. start -= idx * 32;
  246. idx = 2 - idx; /* flip */
  247. return (ale_entry[idx] >> start) & BITMASK(bits);
  248. }
  249. static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
  250. u32 value)
  251. {
  252. int idx;
  253. value &= BITMASK(bits);
  254. idx = start / 32;
  255. start -= idx * 32;
  256. idx = 2 - idx; /* flip */
  257. ale_entry[idx] &= ~(BITMASK(bits) << start);
  258. ale_entry[idx] |= (value << start);
  259. }
  260. #define DEFINE_ALE_FIELD(name, start, bits) \
  261. static inline int cpsw_ale_get_##name(u32 *ale_entry) \
  262. { \
  263. return cpsw_ale_get_field(ale_entry, start, bits); \
  264. } \
  265. static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \
  266. { \
  267. cpsw_ale_set_field(ale_entry, start, bits, value); \
  268. }
  269. DEFINE_ALE_FIELD(entry_type, 60, 2)
  270. DEFINE_ALE_FIELD(mcast_state, 62, 2)
  271. DEFINE_ALE_FIELD(port_mask, 66, 3)
  272. DEFINE_ALE_FIELD(ucast_type, 62, 2)
  273. DEFINE_ALE_FIELD(port_num, 66, 2)
  274. DEFINE_ALE_FIELD(blocked, 65, 1)
  275. DEFINE_ALE_FIELD(secure, 64, 1)
  276. DEFINE_ALE_FIELD(mcast, 40, 1)
  277. /* The MAC address field in the ALE entry cannot be macroized as above */
  278. static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
  279. {
  280. int i;
  281. for (i = 0; i < 6; i++)
  282. addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8);
  283. }
  284. static inline void cpsw_ale_set_addr(u32 *ale_entry, const u8 *addr)
  285. {
  286. int i;
  287. for (i = 0; i < 6; i++)
  288. cpsw_ale_set_field(ale_entry, 40 - 8*i, 8, addr[i]);
  289. }
  290. static int cpsw_ale_read(struct cpsw_priv *priv, int idx, u32 *ale_entry)
  291. {
  292. int i;
  293. __raw_writel(idx, priv->ale_regs + ALE_TABLE_CONTROL);
  294. for (i = 0; i < ALE_ENTRY_WORDS; i++)
  295. ale_entry[i] = __raw_readl(priv->ale_regs + ALE_TABLE + 4 * i);
  296. return idx;
  297. }
  298. static int cpsw_ale_write(struct cpsw_priv *priv, int idx, u32 *ale_entry)
  299. {
  300. int i;
  301. for (i = 0; i < ALE_ENTRY_WORDS; i++)
  302. __raw_writel(ale_entry[i], priv->ale_regs + ALE_TABLE + 4 * i);
  303. __raw_writel(idx | ALE_TABLE_WRITE, priv->ale_regs + ALE_TABLE_CONTROL);
  304. return idx;
  305. }
  306. static int cpsw_ale_match_addr(struct cpsw_priv *priv, const u8 *addr)
  307. {
  308. u32 ale_entry[ALE_ENTRY_WORDS];
  309. int type, idx;
  310. for (idx = 0; idx < priv->data.ale_entries; idx++) {
  311. u8 entry_addr[6];
  312. cpsw_ale_read(priv, idx, ale_entry);
  313. type = cpsw_ale_get_entry_type(ale_entry);
  314. if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
  315. continue;
  316. cpsw_ale_get_addr(ale_entry, entry_addr);
  317. if (memcmp(entry_addr, addr, 6) == 0)
  318. return idx;
  319. }
  320. return -ENOENT;
  321. }
  322. static int cpsw_ale_match_free(struct cpsw_priv *priv)
  323. {
  324. u32 ale_entry[ALE_ENTRY_WORDS];
  325. int type, idx;
  326. for (idx = 0; idx < priv->data.ale_entries; idx++) {
  327. cpsw_ale_read(priv, idx, ale_entry);
  328. type = cpsw_ale_get_entry_type(ale_entry);
  329. if (type == ALE_TYPE_FREE)
  330. return idx;
  331. }
  332. return -ENOENT;
  333. }
  334. static int cpsw_ale_find_ageable(struct cpsw_priv *priv)
  335. {
  336. u32 ale_entry[ALE_ENTRY_WORDS];
  337. int type, idx;
  338. for (idx = 0; idx < priv->data.ale_entries; idx++) {
  339. cpsw_ale_read(priv, idx, ale_entry);
  340. type = cpsw_ale_get_entry_type(ale_entry);
  341. if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
  342. continue;
  343. if (cpsw_ale_get_mcast(ale_entry))
  344. continue;
  345. type = cpsw_ale_get_ucast_type(ale_entry);
  346. if (type != ALE_UCAST_PERSISTANT &&
  347. type != ALE_UCAST_OUI)
  348. return idx;
  349. }
  350. return -ENOENT;
  351. }
  352. static int cpsw_ale_add_ucast(struct cpsw_priv *priv, const u8 *addr,
  353. int port, int flags)
  354. {
  355. u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
  356. int idx;
  357. cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
  358. cpsw_ale_set_addr(ale_entry, addr);
  359. cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
  360. cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
  361. cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
  362. cpsw_ale_set_port_num(ale_entry, port);
  363. idx = cpsw_ale_match_addr(priv, addr);
  364. if (idx < 0)
  365. idx = cpsw_ale_match_free(priv);
  366. if (idx < 0)
  367. idx = cpsw_ale_find_ageable(priv);
  368. if (idx < 0)
  369. return -ENOMEM;
  370. cpsw_ale_write(priv, idx, ale_entry);
  371. return 0;
  372. }
  373. static int cpsw_ale_add_mcast(struct cpsw_priv *priv, const u8 *addr,
  374. int port_mask)
  375. {
  376. u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
  377. int idx, mask;
  378. idx = cpsw_ale_match_addr(priv, addr);
  379. if (idx >= 0)
  380. cpsw_ale_read(priv, idx, ale_entry);
  381. cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
  382. cpsw_ale_set_addr(ale_entry, addr);
  383. cpsw_ale_set_mcast_state(ale_entry, ALE_MCAST_FWD_2);
  384. mask = cpsw_ale_get_port_mask(ale_entry);
  385. port_mask |= mask;
  386. cpsw_ale_set_port_mask(ale_entry, port_mask);
  387. if (idx < 0)
  388. idx = cpsw_ale_match_free(priv);
  389. if (idx < 0)
  390. idx = cpsw_ale_find_ageable(priv);
  391. if (idx < 0)
  392. return -ENOMEM;
  393. cpsw_ale_write(priv, idx, ale_entry);
  394. return 0;
  395. }
  396. static inline void cpsw_ale_control(struct cpsw_priv *priv, int bit, int val)
  397. {
  398. u32 tmp, mask = BIT(bit);
  399. tmp = __raw_readl(priv->ale_regs + ALE_CONTROL);
  400. tmp &= ~mask;
  401. tmp |= val ? mask : 0;
  402. __raw_writel(tmp, priv->ale_regs + ALE_CONTROL);
  403. }
  404. #define cpsw_ale_enable(priv, val) cpsw_ale_control(priv, 31, val)
  405. #define cpsw_ale_clear(priv, val) cpsw_ale_control(priv, 30, val)
  406. #define cpsw_ale_vlan_aware(priv, val) cpsw_ale_control(priv, 2, val)
  407. static inline void cpsw_ale_port_state(struct cpsw_priv *priv, int port,
  408. int val)
  409. {
  410. int offset = ALE_PORTCTL + 4 * port;
  411. u32 tmp, mask = 0x3;
  412. tmp = __raw_readl(priv->ale_regs + offset);
  413. tmp &= ~mask;
  414. tmp |= val & mask;
  415. __raw_writel(tmp, priv->ale_regs + offset);
  416. }
  417. static struct cpsw_mdio_regs *mdio_regs;
  418. /* wait until hardware is ready for another user access */
  419. static inline u32 wait_for_user_access(void)
  420. {
  421. u32 reg = 0;
  422. int timeout = MDIO_TIMEOUT;
  423. while (timeout-- &&
  424. ((reg = __raw_readl(&mdio_regs->user[0].access)) & USERACCESS_GO))
  425. udelay(10);
  426. if (timeout == -1) {
  427. printf("wait_for_user_access Timeout\n");
  428. return -ETIMEDOUT;
  429. }
  430. return reg;
  431. }
  432. /* wait until hardware state machine is idle */
  433. static inline void wait_for_idle(void)
  434. {
  435. int timeout = MDIO_TIMEOUT;
  436. while (timeout-- &&
  437. ((__raw_readl(&mdio_regs->control) & CONTROL_IDLE) == 0))
  438. udelay(10);
  439. if (timeout == -1)
  440. printf("wait_for_idle Timeout\n");
  441. }
  442. static int cpsw_mdio_read(struct mii_dev *bus, int phy_id,
  443. int dev_addr, int phy_reg)
  444. {
  445. int data;
  446. u32 reg;
  447. if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
  448. return -EINVAL;
  449. wait_for_user_access();
  450. reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
  451. (phy_id << 16));
  452. __raw_writel(reg, &mdio_regs->user[0].access);
  453. reg = wait_for_user_access();
  454. data = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -1;
  455. return data;
  456. }
  457. static int cpsw_mdio_write(struct mii_dev *bus, int phy_id, int dev_addr,
  458. int phy_reg, u16 data)
  459. {
  460. u32 reg;
  461. if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
  462. return -EINVAL;
  463. wait_for_user_access();
  464. reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
  465. (phy_id << 16) | (data & USERACCESS_DATA));
  466. __raw_writel(reg, &mdio_regs->user[0].access);
  467. wait_for_user_access();
  468. return 0;
  469. }
  470. static void cpsw_mdio_init(const char *name, u32 mdio_base, u32 div)
  471. {
  472. struct mii_dev *bus = mdio_alloc();
  473. mdio_regs = (struct cpsw_mdio_regs *)mdio_base;
  474. /* set enable and clock divider */
  475. __raw_writel(div | CONTROL_ENABLE, &mdio_regs->control);
  476. /*
  477. * wait for scan logic to settle:
  478. * the scan time consists of (a) a large fixed component, and (b) a
  479. * small component that varies with the mii bus frequency. These
  480. * were estimated using measurements at 1.1 and 2.2 MHz on tnetv107x
  481. * silicon. Since the effect of (b) was found to be largely
  482. * negligible, we keep things simple here.
  483. */
  484. udelay(1000);
  485. bus->read = cpsw_mdio_read;
  486. bus->write = cpsw_mdio_write;
  487. strcpy(bus->name, name);
  488. mdio_register(bus);
  489. }
  490. /* Set a self-clearing bit in a register, and wait for it to clear */
  491. static inline void setbit_and_wait_for_clear32(void *addr)
  492. {
  493. __raw_writel(CLEAR_BIT, addr);
  494. while (__raw_readl(addr) & CLEAR_BIT)
  495. ;
  496. }
  497. #define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
  498. ((mac)[2] << 16) | ((mac)[3] << 24))
  499. #define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
  500. static void cpsw_set_slave_mac(struct cpsw_slave *slave,
  501. struct cpsw_priv *priv)
  502. {
  503. #ifdef CONFIG_DM_ETH
  504. struct eth_pdata *pdata = dev_get_platdata(priv->dev);
  505. writel(mac_hi(pdata->enetaddr), &slave->regs->sa_hi);
  506. writel(mac_lo(pdata->enetaddr), &slave->regs->sa_lo);
  507. #else
  508. __raw_writel(mac_hi(priv->dev->enetaddr), &slave->regs->sa_hi);
  509. __raw_writel(mac_lo(priv->dev->enetaddr), &slave->regs->sa_lo);
  510. #endif
  511. }
  512. static int cpsw_slave_update_link(struct cpsw_slave *slave,
  513. struct cpsw_priv *priv, int *link)
  514. {
  515. struct phy_device *phy;
  516. u32 mac_control = 0;
  517. int ret = -ENODEV;
  518. phy = priv->phydev;
  519. if (!phy)
  520. goto out;
  521. ret = phy_startup(phy);
  522. if (ret)
  523. goto out;
  524. if (link)
  525. *link = phy->link;
  526. if (phy->link) { /* link up */
  527. mac_control = priv->data.mac_control;
  528. if (phy->speed == 1000)
  529. mac_control |= GIGABITEN;
  530. if (phy->duplex == DUPLEX_FULL)
  531. mac_control |= FULLDUPLEXEN;
  532. if (phy->speed == 100)
  533. mac_control |= MIIEN;
  534. }
  535. if (mac_control == slave->mac_control)
  536. goto out;
  537. if (mac_control) {
  538. printf("link up on port %d, speed %d, %s duplex\n",
  539. slave->slave_num, phy->speed,
  540. (phy->duplex == DUPLEX_FULL) ? "full" : "half");
  541. } else {
  542. printf("link down on port %d\n", slave->slave_num);
  543. }
  544. __raw_writel(mac_control, &slave->sliver->mac_control);
  545. slave->mac_control = mac_control;
  546. out:
  547. return ret;
  548. }
  549. static int cpsw_update_link(struct cpsw_priv *priv)
  550. {
  551. int ret = -ENODEV;
  552. struct cpsw_slave *slave;
  553. for_active_slave(slave, priv)
  554. ret = cpsw_slave_update_link(slave, priv, NULL);
  555. return ret;
  556. }
  557. static inline u32 cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
  558. {
  559. if (priv->host_port == 0)
  560. return slave_num + 1;
  561. else
  562. return slave_num;
  563. }
  564. static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
  565. {
  566. u32 slave_port;
  567. setbit_and_wait_for_clear32(&slave->sliver->soft_reset);
  568. /* setup priority mapping */
  569. __raw_writel(0x76543210, &slave->sliver->rx_pri_map);
  570. __raw_writel(0x33221100, &slave->regs->tx_pri_map);
  571. /* setup max packet size, and mac address */
  572. __raw_writel(PKT_MAX, &slave->sliver->rx_maxlen);
  573. cpsw_set_slave_mac(slave, priv);
  574. slave->mac_control = 0; /* no link yet */
  575. /* enable forwarding */
  576. slave_port = cpsw_get_slave_port(priv, slave->slave_num);
  577. cpsw_ale_port_state(priv, slave_port, ALE_PORT_STATE_FORWARD);
  578. cpsw_ale_add_mcast(priv, net_bcast_ethaddr, 1 << slave_port);
  579. priv->phy_mask |= 1 << slave->data->phy_addr;
  580. }
  581. static struct cpdma_desc *cpdma_desc_alloc(struct cpsw_priv *priv)
  582. {
  583. struct cpdma_desc *desc = priv->desc_free;
  584. if (desc)
  585. priv->desc_free = desc_read_ptr(desc, hw_next);
  586. return desc;
  587. }
  588. static void cpdma_desc_free(struct cpsw_priv *priv, struct cpdma_desc *desc)
  589. {
  590. if (desc) {
  591. desc_write(desc, hw_next, priv->desc_free);
  592. priv->desc_free = desc;
  593. }
  594. }
  595. static int cpdma_submit(struct cpsw_priv *priv, struct cpdma_chan *chan,
  596. void *buffer, int len)
  597. {
  598. struct cpdma_desc *desc, *prev;
  599. u32 mode;
  600. desc = cpdma_desc_alloc(priv);
  601. if (!desc)
  602. return -ENOMEM;
  603. if (len < PKT_MIN)
  604. len = PKT_MIN;
  605. mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
  606. desc_write(desc, hw_next, 0);
  607. desc_write(desc, hw_buffer, buffer);
  608. desc_write(desc, hw_len, len);
  609. desc_write(desc, hw_mode, mode | len);
  610. desc_write(desc, sw_buffer, buffer);
  611. desc_write(desc, sw_len, len);
  612. if (!chan->head) {
  613. /* simple case - first packet enqueued */
  614. chan->head = desc;
  615. chan->tail = desc;
  616. chan_write(chan, hdp, desc);
  617. goto done;
  618. }
  619. /* not the first packet - enqueue at the tail */
  620. prev = chan->tail;
  621. desc_write(prev, hw_next, desc);
  622. chan->tail = desc;
  623. /* next check if EOQ has been triggered already */
  624. if (desc_read(prev, hw_mode) & CPDMA_DESC_EOQ)
  625. chan_write(chan, hdp, desc);
  626. done:
  627. if (chan->rxfree)
  628. chan_write(chan, rxfree, 1);
  629. return 0;
  630. }
  631. static int cpdma_process(struct cpsw_priv *priv, struct cpdma_chan *chan,
  632. void **buffer, int *len)
  633. {
  634. struct cpdma_desc *desc = chan->head;
  635. u32 status;
  636. if (!desc)
  637. return -ENOENT;
  638. status = desc_read(desc, hw_mode);
  639. if (len)
  640. *len = status & 0x7ff;
  641. if (buffer)
  642. *buffer = desc_read_ptr(desc, sw_buffer);
  643. if (status & CPDMA_DESC_OWNER) {
  644. if (chan_read(chan, hdp) == 0) {
  645. if (desc_read(desc, hw_mode) & CPDMA_DESC_OWNER)
  646. chan_write(chan, hdp, desc);
  647. }
  648. return -EBUSY;
  649. }
  650. chan->head = desc_read_ptr(desc, hw_next);
  651. chan_write(chan, cp, desc);
  652. cpdma_desc_free(priv, desc);
  653. return 0;
  654. }
  655. static int _cpsw_init(struct cpsw_priv *priv, u8 *enetaddr)
  656. {
  657. struct cpsw_slave *slave;
  658. int i, ret;
  659. /* soft reset the controller and initialize priv */
  660. setbit_and_wait_for_clear32(&priv->regs->soft_reset);
  661. /* initialize and reset the address lookup engine */
  662. cpsw_ale_enable(priv, 1);
  663. cpsw_ale_clear(priv, 1);
  664. cpsw_ale_vlan_aware(priv, 0); /* vlan unaware mode */
  665. /* setup host port priority mapping */
  666. __raw_writel(0x76543210, &priv->host_port_regs->cpdma_tx_pri_map);
  667. __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
  668. /* disable priority elevation and enable statistics on all ports */
  669. __raw_writel(0, &priv->regs->ptype);
  670. /* enable statistics collection only on the host port */
  671. __raw_writel(BIT(priv->host_port), &priv->regs->stat_port_en);
  672. __raw_writel(0x7, &priv->regs->stat_port_en);
  673. cpsw_ale_port_state(priv, priv->host_port, ALE_PORT_STATE_FORWARD);
  674. cpsw_ale_add_ucast(priv, enetaddr, priv->host_port, ALE_SECURE);
  675. cpsw_ale_add_mcast(priv, net_bcast_ethaddr, 1 << priv->host_port);
  676. for_active_slave(slave, priv)
  677. cpsw_slave_init(slave, priv);
  678. ret = cpsw_update_link(priv);
  679. if (ret)
  680. goto out;
  681. /* init descriptor pool */
  682. for (i = 0; i < NUM_DESCS; i++) {
  683. desc_write(&priv->descs[i], hw_next,
  684. (i == (NUM_DESCS - 1)) ? 0 : &priv->descs[i+1]);
  685. }
  686. priv->desc_free = &priv->descs[0];
  687. /* initialize channels */
  688. if (priv->data.version == CPSW_CTRL_VERSION_2) {
  689. memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
  690. priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER2;
  691. priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER2;
  692. priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
  693. memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
  694. priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER2;
  695. priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER2;
  696. } else {
  697. memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
  698. priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER1;
  699. priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER1;
  700. priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
  701. memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
  702. priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER1;
  703. priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER1;
  704. }
  705. /* clear dma state */
  706. setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
  707. if (priv->data.version == CPSW_CTRL_VERSION_2) {
  708. for (i = 0; i < priv->data.channels; i++) {
  709. __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER2 + 4
  710. * i);
  711. __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
  712. * i);
  713. __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER2 + 4
  714. * i);
  715. __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER2 + 4
  716. * i);
  717. __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER2 + 4
  718. * i);
  719. }
  720. } else {
  721. for (i = 0; i < priv->data.channels; i++) {
  722. __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER1 + 4
  723. * i);
  724. __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
  725. * i);
  726. __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER1 + 4
  727. * i);
  728. __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER1 + 4
  729. * i);
  730. __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER1 + 4
  731. * i);
  732. }
  733. }
  734. __raw_writel(1, priv->dma_regs + CPDMA_TXCONTROL);
  735. __raw_writel(1, priv->dma_regs + CPDMA_RXCONTROL);
  736. /* submit rx descs */
  737. for (i = 0; i < PKTBUFSRX; i++) {
  738. ret = cpdma_submit(priv, &priv->rx_chan, net_rx_packets[i],
  739. PKTSIZE);
  740. if (ret < 0) {
  741. printf("error %d submitting rx desc\n", ret);
  742. break;
  743. }
  744. }
  745. out:
  746. return ret;
  747. }
  748. static void _cpsw_halt(struct cpsw_priv *priv)
  749. {
  750. writel(0, priv->dma_regs + CPDMA_TXCONTROL);
  751. writel(0, priv->dma_regs + CPDMA_RXCONTROL);
  752. /* soft reset the controller and initialize priv */
  753. setbit_and_wait_for_clear32(&priv->regs->soft_reset);
  754. /* clear dma state */
  755. setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
  756. }
  757. static int _cpsw_send(struct cpsw_priv *priv, void *packet, int length)
  758. {
  759. void *buffer;
  760. int len;
  761. int timeout = CPDMA_TIMEOUT;
  762. flush_dcache_range((unsigned long)packet,
  763. (unsigned long)packet + ALIGN(length, PKTALIGN));
  764. /* first reap completed packets */
  765. while (timeout-- &&
  766. (cpdma_process(priv, &priv->tx_chan, &buffer, &len) >= 0))
  767. ;
  768. if (timeout == -1) {
  769. printf("cpdma_process timeout\n");
  770. return -ETIMEDOUT;
  771. }
  772. return cpdma_submit(priv, &priv->tx_chan, packet, length);
  773. }
  774. static int _cpsw_recv(struct cpsw_priv *priv, uchar **pkt)
  775. {
  776. void *buffer;
  777. int len;
  778. int ret = -EAGAIN;
  779. ret = cpdma_process(priv, &priv->rx_chan, &buffer, &len);
  780. if (ret < 0)
  781. return ret;
  782. invalidate_dcache_range((unsigned long)buffer,
  783. (unsigned long)buffer + PKTSIZE_ALIGN);
  784. *pkt = buffer;
  785. return len;
  786. }
  787. static void cpsw_slave_setup(struct cpsw_slave *slave, int slave_num,
  788. struct cpsw_priv *priv)
  789. {
  790. void *regs = priv->regs;
  791. struct cpsw_slave_data *data = priv->data.slave_data + slave_num;
  792. slave->slave_num = slave_num;
  793. slave->data = data;
  794. slave->regs = regs + data->slave_reg_ofs;
  795. slave->sliver = regs + data->sliver_reg_ofs;
  796. }
  797. static int cpsw_phy_init(struct cpsw_priv *priv, struct cpsw_slave *slave)
  798. {
  799. struct phy_device *phydev;
  800. u32 supported = PHY_GBIT_FEATURES;
  801. phydev = phy_connect(priv->bus,
  802. slave->data->phy_addr,
  803. priv->dev,
  804. slave->data->phy_if);
  805. if (!phydev)
  806. return -1;
  807. phydev->supported &= supported;
  808. phydev->advertising = phydev->supported;
  809. #ifdef CONFIG_DM_ETH
  810. if (slave->data->phy_of_handle)
  811. phydev->dev->of_offset = slave->data->phy_of_handle;
  812. #endif
  813. priv->phydev = phydev;
  814. phy_config(phydev);
  815. return 1;
  816. }
  817. int _cpsw_register(struct cpsw_priv *priv)
  818. {
  819. struct cpsw_slave *slave;
  820. struct cpsw_platform_data *data = &priv->data;
  821. void *regs = (void *)data->cpsw_base;
  822. priv->slaves = malloc(sizeof(struct cpsw_slave) * data->slaves);
  823. if (!priv->slaves) {
  824. return -ENOMEM;
  825. }
  826. priv->host_port = data->host_port_num;
  827. priv->regs = regs;
  828. priv->host_port_regs = regs + data->host_port_reg_ofs;
  829. priv->dma_regs = regs + data->cpdma_reg_ofs;
  830. priv->ale_regs = regs + data->ale_reg_ofs;
  831. priv->descs = (void *)regs + data->bd_ram_ofs;
  832. int idx = 0;
  833. for_each_slave(slave, priv) {
  834. cpsw_slave_setup(slave, idx, priv);
  835. idx = idx + 1;
  836. }
  837. cpsw_mdio_init(priv->dev->name, data->mdio_base, data->mdio_div);
  838. priv->bus = miiphy_get_dev_by_name(priv->dev->name);
  839. for_active_slave(slave, priv)
  840. cpsw_phy_init(priv, slave);
  841. return 0;
  842. }
  843. #ifndef CONFIG_DM_ETH
  844. static int cpsw_init(struct eth_device *dev, bd_t *bis)
  845. {
  846. struct cpsw_priv *priv = dev->priv;
  847. return _cpsw_init(priv, dev->enetaddr);
  848. }
  849. static void cpsw_halt(struct eth_device *dev)
  850. {
  851. struct cpsw_priv *priv = dev->priv;
  852. return _cpsw_halt(priv);
  853. }
  854. static int cpsw_send(struct eth_device *dev, void *packet, int length)
  855. {
  856. struct cpsw_priv *priv = dev->priv;
  857. return _cpsw_send(priv, packet, length);
  858. }
  859. static int cpsw_recv(struct eth_device *dev)
  860. {
  861. struct cpsw_priv *priv = dev->priv;
  862. uchar *pkt = NULL;
  863. int len;
  864. len = _cpsw_recv(priv, &pkt);
  865. if (len > 0) {
  866. net_process_received_packet(pkt, len);
  867. cpdma_submit(priv, &priv->rx_chan, pkt, PKTSIZE);
  868. }
  869. return len;
  870. }
  871. int cpsw_register(struct cpsw_platform_data *data)
  872. {
  873. struct cpsw_priv *priv;
  874. struct eth_device *dev;
  875. int ret;
  876. dev = calloc(sizeof(*dev), 1);
  877. if (!dev)
  878. return -ENOMEM;
  879. priv = calloc(sizeof(*priv), 1);
  880. if (!priv) {
  881. free(dev);
  882. return -ENOMEM;
  883. }
  884. priv->dev = dev;
  885. priv->data = *data;
  886. strcpy(dev->name, "cpsw");
  887. dev->iobase = 0;
  888. dev->init = cpsw_init;
  889. dev->halt = cpsw_halt;
  890. dev->send = cpsw_send;
  891. dev->recv = cpsw_recv;
  892. dev->priv = priv;
  893. eth_register(dev);
  894. ret = _cpsw_register(priv);
  895. if (ret < 0) {
  896. eth_unregister(dev);
  897. free(dev);
  898. free(priv);
  899. return ret;
  900. }
  901. return 1;
  902. }
  903. #else
  904. static int cpsw_eth_start(struct udevice *dev)
  905. {
  906. struct eth_pdata *pdata = dev_get_platdata(dev);
  907. struct cpsw_priv *priv = dev_get_priv(dev);
  908. return _cpsw_init(priv, pdata->enetaddr);
  909. }
  910. static int cpsw_eth_send(struct udevice *dev, void *packet, int length)
  911. {
  912. struct cpsw_priv *priv = dev_get_priv(dev);
  913. return _cpsw_send(priv, packet, length);
  914. }
  915. static int cpsw_eth_recv(struct udevice *dev, int flags, uchar **packetp)
  916. {
  917. struct cpsw_priv *priv = dev_get_priv(dev);
  918. return _cpsw_recv(priv, packetp);
  919. }
  920. static int cpsw_eth_free_pkt(struct udevice *dev, uchar *packet,
  921. int length)
  922. {
  923. struct cpsw_priv *priv = dev_get_priv(dev);
  924. return cpdma_submit(priv, &priv->rx_chan, packet, PKTSIZE);
  925. }
  926. static void cpsw_eth_stop(struct udevice *dev)
  927. {
  928. struct cpsw_priv *priv = dev_get_priv(dev);
  929. return _cpsw_halt(priv);
  930. }
  931. static int cpsw_eth_probe(struct udevice *dev)
  932. {
  933. struct cpsw_priv *priv = dev_get_priv(dev);
  934. priv->dev = dev;
  935. return _cpsw_register(priv);
  936. }
  937. static const struct eth_ops cpsw_eth_ops = {
  938. .start = cpsw_eth_start,
  939. .send = cpsw_eth_send,
  940. .recv = cpsw_eth_recv,
  941. .free_pkt = cpsw_eth_free_pkt,
  942. .stop = cpsw_eth_stop,
  943. };
  944. static inline fdt_addr_t cpsw_get_addr_by_node(const void *fdt, int node)
  945. {
  946. return fdtdec_get_addr_size_auto_noparent(fdt, node, "reg", 0, NULL,
  947. false);
  948. }
  949. static void cpsw_gmii_sel_am3352(struct cpsw_priv *priv,
  950. phy_interface_t phy_mode)
  951. {
  952. u32 reg;
  953. u32 mask;
  954. u32 mode = 0;
  955. bool rgmii_id = false;
  956. int slave = priv->data.active_slave;
  957. reg = readl(priv->data.gmii_sel);
  958. switch (phy_mode) {
  959. case PHY_INTERFACE_MODE_RMII:
  960. mode = AM33XX_GMII_SEL_MODE_RMII;
  961. break;
  962. case PHY_INTERFACE_MODE_RGMII:
  963. mode = AM33XX_GMII_SEL_MODE_RGMII;
  964. break;
  965. case PHY_INTERFACE_MODE_RGMII_ID:
  966. case PHY_INTERFACE_MODE_RGMII_RXID:
  967. case PHY_INTERFACE_MODE_RGMII_TXID:
  968. mode = AM33XX_GMII_SEL_MODE_RGMII;
  969. rgmii_id = true;
  970. break;
  971. case PHY_INTERFACE_MODE_MII:
  972. default:
  973. mode = AM33XX_GMII_SEL_MODE_MII;
  974. break;
  975. };
  976. mask = GMII_SEL_MODE_MASK << (slave * 2) | BIT(slave + 6);
  977. mode <<= slave * 2;
  978. if (priv->data.rmii_clock_external) {
  979. if (slave == 0)
  980. mode |= AM33XX_GMII_SEL_RMII1_IO_CLK_EN;
  981. else
  982. mode |= AM33XX_GMII_SEL_RMII2_IO_CLK_EN;
  983. }
  984. if (rgmii_id) {
  985. if (slave == 0)
  986. mode |= AM33XX_GMII_SEL_RGMII1_IDMODE;
  987. else
  988. mode |= AM33XX_GMII_SEL_RGMII2_IDMODE;
  989. }
  990. reg &= ~mask;
  991. reg |= mode;
  992. writel(reg, priv->data.gmii_sel);
  993. }
  994. static void cpsw_gmii_sel_dra7xx(struct cpsw_priv *priv,
  995. phy_interface_t phy_mode)
  996. {
  997. u32 reg;
  998. u32 mask;
  999. u32 mode = 0;
  1000. int slave = priv->data.active_slave;
  1001. reg = readl(priv->data.gmii_sel);
  1002. switch (phy_mode) {
  1003. case PHY_INTERFACE_MODE_RMII:
  1004. mode = AM33XX_GMII_SEL_MODE_RMII;
  1005. break;
  1006. case PHY_INTERFACE_MODE_RGMII:
  1007. case PHY_INTERFACE_MODE_RGMII_ID:
  1008. case PHY_INTERFACE_MODE_RGMII_RXID:
  1009. case PHY_INTERFACE_MODE_RGMII_TXID:
  1010. mode = AM33XX_GMII_SEL_MODE_RGMII;
  1011. break;
  1012. case PHY_INTERFACE_MODE_MII:
  1013. default:
  1014. mode = AM33XX_GMII_SEL_MODE_MII;
  1015. break;
  1016. };
  1017. switch (slave) {
  1018. case 0:
  1019. mask = GMII_SEL_MODE_MASK;
  1020. break;
  1021. case 1:
  1022. mask = GMII_SEL_MODE_MASK << 4;
  1023. mode <<= 4;
  1024. break;
  1025. default:
  1026. dev_err(priv->dev, "invalid slave number...\n");
  1027. return;
  1028. }
  1029. if (priv->data.rmii_clock_external)
  1030. dev_err(priv->dev, "RMII External clock is not supported\n");
  1031. reg &= ~mask;
  1032. reg |= mode;
  1033. writel(reg, priv->data.gmii_sel);
  1034. }
  1035. static void cpsw_phy_sel(struct cpsw_priv *priv, const char *compat,
  1036. phy_interface_t phy_mode)
  1037. {
  1038. if (!strcmp(compat, "ti,am3352-cpsw-phy-sel"))
  1039. cpsw_gmii_sel_am3352(priv, phy_mode);
  1040. if (!strcmp(compat, "ti,am43xx-cpsw-phy-sel"))
  1041. cpsw_gmii_sel_am3352(priv, phy_mode);
  1042. else if (!strcmp(compat, "ti,dra7xx-cpsw-phy-sel"))
  1043. cpsw_gmii_sel_dra7xx(priv, phy_mode);
  1044. }
  1045. static int cpsw_eth_ofdata_to_platdata(struct udevice *dev)
  1046. {
  1047. struct eth_pdata *pdata = dev_get_platdata(dev);
  1048. struct cpsw_priv *priv = dev_get_priv(dev);
  1049. struct gpio_desc *mode_gpios;
  1050. const char *phy_mode;
  1051. const char *phy_sel_compat = NULL;
  1052. const void *fdt = gd->fdt_blob;
  1053. int node = dev->of_offset;
  1054. int subnode;
  1055. int slave_index = 0;
  1056. int active_slave;
  1057. int num_mode_gpios;
  1058. int ret;
  1059. pdata->iobase = dev_get_addr(dev);
  1060. priv->data.version = CPSW_CTRL_VERSION_2;
  1061. priv->data.bd_ram_ofs = CPSW_BD_OFFSET;
  1062. priv->data.ale_reg_ofs = CPSW_ALE_OFFSET;
  1063. priv->data.cpdma_reg_ofs = CPSW_CPDMA_OFFSET;
  1064. priv->data.mdio_div = CPSW_MDIO_DIV;
  1065. priv->data.host_port_reg_ofs = CPSW_HOST_PORT_OFFSET,
  1066. pdata->phy_interface = -1;
  1067. priv->data.cpsw_base = pdata->iobase;
  1068. priv->data.channels = fdtdec_get_int(fdt, node, "cpdma_channels", -1);
  1069. if (priv->data.channels <= 0) {
  1070. printf("error: cpdma_channels not found in dt\n");
  1071. return -ENOENT;
  1072. }
  1073. priv->data.slaves = fdtdec_get_int(fdt, node, "slaves", -1);
  1074. if (priv->data.slaves <= 0) {
  1075. printf("error: slaves not found in dt\n");
  1076. return -ENOENT;
  1077. }
  1078. priv->data.slave_data = malloc(sizeof(struct cpsw_slave_data) *
  1079. priv->data.slaves);
  1080. priv->data.ale_entries = fdtdec_get_int(fdt, node, "ale_entries", -1);
  1081. if (priv->data.ale_entries <= 0) {
  1082. printf("error: ale_entries not found in dt\n");
  1083. return -ENOENT;
  1084. }
  1085. priv->data.bd_ram_ofs = fdtdec_get_int(fdt, node, "bd_ram_size", -1);
  1086. if (priv->data.bd_ram_ofs <= 0) {
  1087. printf("error: bd_ram_size not found in dt\n");
  1088. return -ENOENT;
  1089. }
  1090. priv->data.mac_control = fdtdec_get_int(fdt, node, "mac_control", -1);
  1091. if (priv->data.mac_control <= 0) {
  1092. printf("error: ale_entries not found in dt\n");
  1093. return -ENOENT;
  1094. }
  1095. num_mode_gpios = gpio_get_list_count(dev, "mode-gpios");
  1096. if (num_mode_gpios > 0) {
  1097. mode_gpios = malloc(sizeof(struct gpio_desc) *
  1098. num_mode_gpios);
  1099. gpio_request_list_by_name(dev, "mode-gpios", mode_gpios,
  1100. num_mode_gpios, GPIOD_IS_OUT);
  1101. free(mode_gpios);
  1102. }
  1103. active_slave = fdtdec_get_int(fdt, node, "active_slave", 0);
  1104. priv->data.active_slave = active_slave;
  1105. fdt_for_each_subnode(subnode, fdt, node) {
  1106. int len;
  1107. const char *name;
  1108. name = fdt_get_name(fdt, subnode, &len);
  1109. if (!strncmp(name, "mdio", 4)) {
  1110. u32 mdio_base;
  1111. mdio_base = cpsw_get_addr_by_node(fdt, subnode);
  1112. if (mdio_base == FDT_ADDR_T_NONE) {
  1113. error("Not able to get MDIO address space\n");
  1114. return -ENOENT;
  1115. }
  1116. priv->data.mdio_base = mdio_base;
  1117. }
  1118. if (!strncmp(name, "slave", 5)) {
  1119. u32 phy_id[2];
  1120. if (slave_index >= priv->data.slaves)
  1121. continue;
  1122. phy_mode = fdt_getprop(fdt, subnode, "phy-mode", NULL);
  1123. if (phy_mode)
  1124. priv->data.slave_data[slave_index].phy_if =
  1125. phy_get_interface_by_name(phy_mode);
  1126. priv->data.slave_data[slave_index].phy_of_handle =
  1127. fdtdec_lookup_phandle(fdt, subnode,
  1128. "phy-handle");
  1129. if (priv->data.slave_data[slave_index].phy_of_handle >= 0) {
  1130. priv->data.slave_data[slave_index].phy_addr =
  1131. fdtdec_get_int(gd->fdt_blob,
  1132. priv->data.slave_data[slave_index].phy_of_handle,
  1133. "reg", -1);
  1134. } else {
  1135. fdtdec_get_int_array(fdt, subnode, "phy_id",
  1136. phy_id, 2);
  1137. priv->data.slave_data[slave_index].phy_addr =
  1138. phy_id[1];
  1139. }
  1140. slave_index++;
  1141. }
  1142. if (!strncmp(name, "cpsw-phy-sel", 12)) {
  1143. priv->data.gmii_sel = cpsw_get_addr_by_node(fdt,
  1144. subnode);
  1145. if (priv->data.gmii_sel == FDT_ADDR_T_NONE) {
  1146. error("Not able to get gmii_sel reg address\n");
  1147. return -ENOENT;
  1148. }
  1149. if (fdt_get_property(fdt, subnode, "rmii-clock-ext",
  1150. NULL))
  1151. priv->data.rmii_clock_external = true;
  1152. phy_sel_compat = fdt_getprop(fdt, subnode, "compatible",
  1153. NULL);
  1154. if (!phy_sel_compat) {
  1155. error("Not able to get gmii_sel compatible\n");
  1156. return -ENOENT;
  1157. }
  1158. }
  1159. }
  1160. priv->data.slave_data[0].slave_reg_ofs = CPSW_SLAVE0_OFFSET;
  1161. priv->data.slave_data[0].sliver_reg_ofs = CPSW_SLIVER0_OFFSET;
  1162. if (priv->data.slaves == 2) {
  1163. priv->data.slave_data[1].slave_reg_ofs = CPSW_SLAVE1_OFFSET;
  1164. priv->data.slave_data[1].sliver_reg_ofs = CPSW_SLIVER1_OFFSET;
  1165. }
  1166. ret = ti_cm_get_macid(dev, active_slave, pdata->enetaddr);
  1167. if (ret < 0) {
  1168. error("cpsw read efuse mac failed\n");
  1169. return ret;
  1170. }
  1171. pdata->phy_interface = priv->data.slave_data[active_slave].phy_if;
  1172. if (pdata->phy_interface == -1) {
  1173. debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
  1174. return -EINVAL;
  1175. }
  1176. /* Select phy interface in control module */
  1177. cpsw_phy_sel(priv, phy_sel_compat, pdata->phy_interface);
  1178. return 0;
  1179. }
  1180. static const struct udevice_id cpsw_eth_ids[] = {
  1181. { .compatible = "ti,cpsw" },
  1182. { .compatible = "ti,am335x-cpsw" },
  1183. { }
  1184. };
  1185. U_BOOT_DRIVER(eth_cpsw) = {
  1186. .name = "eth_cpsw",
  1187. .id = UCLASS_ETH,
  1188. .of_match = cpsw_eth_ids,
  1189. .ofdata_to_platdata = cpsw_eth_ofdata_to_platdata,
  1190. .probe = cpsw_eth_probe,
  1191. .ops = &cpsw_eth_ops,
  1192. .priv_auto_alloc_size = sizeof(struct cpsw_priv),
  1193. .platdata_auto_alloc_size = sizeof(struct eth_pdata),
  1194. .flags = DM_FLAG_ALLOC_PRIV_DMA,
  1195. };
  1196. #endif /* CONFIG_DM_ETH */