phy.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443
  1. /* Framework for configuring and reading PHY devices
  2. * Based on code in sungem_phy.c and gianfar_phy.c
  3. *
  4. * Author: Andy Fleming
  5. *
  6. * Copyright (c) 2004 Freescale Semiconductor, Inc.
  7. * Copyright (c) 2006, 2007 Maciej W. Rozycki
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License as published by the
  11. * Free Software Foundation; either version 2 of the License, or (at your
  12. * option) any later version.
  13. *
  14. */
  15. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  16. #include <linux/kernel.h>
  17. #include <linux/string.h>
  18. #include <linux/errno.h>
  19. #include <linux/unistd.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/delay.h>
  22. #include <linux/netdevice.h>
  23. #include <linux/etherdevice.h>
  24. #include <linux/skbuff.h>
  25. #include <linux/mm.h>
  26. #include <linux/module.h>
  27. #include <linux/mii.h>
  28. #include <linux/ethtool.h>
  29. #include <linux/phy.h>
  30. #include <linux/timer.h>
  31. #include <linux/workqueue.h>
  32. #include <linux/mdio.h>
  33. #include <linux/io.h>
  34. #include <linux/uaccess.h>
  35. #include <linux/atomic.h>
  36. #include <asm/irq.h>
  37. static const char *phy_speed_to_str(int speed)
  38. {
  39. switch (speed) {
  40. case SPEED_10:
  41. return "10Mbps";
  42. case SPEED_100:
  43. return "100Mbps";
  44. case SPEED_1000:
  45. return "1Gbps";
  46. case SPEED_2500:
  47. return "2.5Gbps";
  48. case SPEED_10000:
  49. return "10Gbps";
  50. case SPEED_UNKNOWN:
  51. return "Unknown";
  52. default:
  53. return "Unsupported (update phy.c)";
  54. }
  55. }
  56. #define PHY_STATE_STR(_state) \
  57. case PHY_##_state: \
  58. return __stringify(_state); \
  59. static const char *phy_state_to_str(enum phy_state st)
  60. {
  61. switch (st) {
  62. PHY_STATE_STR(DOWN)
  63. PHY_STATE_STR(STARTING)
  64. PHY_STATE_STR(READY)
  65. PHY_STATE_STR(PENDING)
  66. PHY_STATE_STR(UP)
  67. PHY_STATE_STR(AN)
  68. PHY_STATE_STR(RUNNING)
  69. PHY_STATE_STR(NOLINK)
  70. PHY_STATE_STR(FORCING)
  71. PHY_STATE_STR(CHANGELINK)
  72. PHY_STATE_STR(HALTED)
  73. PHY_STATE_STR(RESUMING)
  74. }
  75. return NULL;
  76. }
  77. /**
  78. * phy_print_status - Convenience function to print out the current phy status
  79. * @phydev: the phy_device struct
  80. */
  81. void phy_print_status(struct phy_device *phydev)
  82. {
  83. if (phydev->link) {
  84. netdev_info(phydev->attached_dev,
  85. "Link is Up - %s/%s - flow control %s\n",
  86. phy_speed_to_str(phydev->speed),
  87. DUPLEX_FULL == phydev->duplex ? "Full" : "Half",
  88. phydev->pause ? "rx/tx" : "off");
  89. } else {
  90. netdev_info(phydev->attached_dev, "Link is Down\n");
  91. }
  92. }
  93. EXPORT_SYMBOL(phy_print_status);
  94. /**
  95. * phy_clear_interrupt - Ack the phy device's interrupt
  96. * @phydev: the phy_device struct
  97. *
  98. * If the @phydev driver has an ack_interrupt function, call it to
  99. * ack and clear the phy device's interrupt.
  100. *
  101. * Returns 0 on success or < 0 on error.
  102. */
  103. static int phy_clear_interrupt(struct phy_device *phydev)
  104. {
  105. if (phydev->drv->ack_interrupt)
  106. return phydev->drv->ack_interrupt(phydev);
  107. return 0;
  108. }
  109. /**
  110. * phy_config_interrupt - configure the PHY device for the requested interrupts
  111. * @phydev: the phy_device struct
  112. * @interrupts: interrupt flags to configure for this @phydev
  113. *
  114. * Returns 0 on success or < 0 on error.
  115. */
  116. static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
  117. {
  118. phydev->interrupts = interrupts;
  119. if (phydev->drv->config_intr)
  120. return phydev->drv->config_intr(phydev);
  121. return 0;
  122. }
  123. /**
  124. * phy_aneg_done - return auto-negotiation status
  125. * @phydev: target phy_device struct
  126. *
  127. * Description: Return the auto-negotiation status from this @phydev
  128. * Returns > 0 on success or < 0 on error. 0 means that auto-negotiation
  129. * is still pending.
  130. */
  131. static inline int phy_aneg_done(struct phy_device *phydev)
  132. {
  133. if (phydev->drv->aneg_done)
  134. return phydev->drv->aneg_done(phydev);
  135. return genphy_aneg_done(phydev);
  136. }
  137. /* A structure for mapping a particular speed and duplex
  138. * combination to a particular SUPPORTED and ADVERTISED value
  139. */
  140. struct phy_setting {
  141. int speed;
  142. int duplex;
  143. u32 setting;
  144. };
  145. /* A mapping of all SUPPORTED settings to speed/duplex */
  146. static const struct phy_setting settings[] = {
  147. {
  148. .speed = SPEED_10000,
  149. .duplex = DUPLEX_FULL,
  150. .setting = SUPPORTED_10000baseKR_Full,
  151. },
  152. {
  153. .speed = SPEED_10000,
  154. .duplex = DUPLEX_FULL,
  155. .setting = SUPPORTED_10000baseKX4_Full,
  156. },
  157. {
  158. .speed = SPEED_10000,
  159. .duplex = DUPLEX_FULL,
  160. .setting = SUPPORTED_10000baseT_Full,
  161. },
  162. {
  163. .speed = SPEED_2500,
  164. .duplex = DUPLEX_FULL,
  165. .setting = SUPPORTED_2500baseX_Full,
  166. },
  167. {
  168. .speed = SPEED_1000,
  169. .duplex = DUPLEX_FULL,
  170. .setting = SUPPORTED_1000baseKX_Full,
  171. },
  172. {
  173. .speed = SPEED_1000,
  174. .duplex = DUPLEX_FULL,
  175. .setting = SUPPORTED_1000baseT_Full,
  176. },
  177. {
  178. .speed = SPEED_1000,
  179. .duplex = DUPLEX_HALF,
  180. .setting = SUPPORTED_1000baseT_Half,
  181. },
  182. {
  183. .speed = SPEED_100,
  184. .duplex = DUPLEX_FULL,
  185. .setting = SUPPORTED_100baseT_Full,
  186. },
  187. {
  188. .speed = SPEED_100,
  189. .duplex = DUPLEX_HALF,
  190. .setting = SUPPORTED_100baseT_Half,
  191. },
  192. {
  193. .speed = SPEED_10,
  194. .duplex = DUPLEX_FULL,
  195. .setting = SUPPORTED_10baseT_Full,
  196. },
  197. {
  198. .speed = SPEED_10,
  199. .duplex = DUPLEX_HALF,
  200. .setting = SUPPORTED_10baseT_Half,
  201. },
  202. };
  203. #define MAX_NUM_SETTINGS ARRAY_SIZE(settings)
  204. /**
  205. * phy_find_setting - find a PHY settings array entry that matches speed & duplex
  206. * @speed: speed to match
  207. * @duplex: duplex to match
  208. *
  209. * Description: Searches the settings array for the setting which
  210. * matches the desired speed and duplex, and returns the index
  211. * of that setting. Returns the index of the last setting if
  212. * none of the others match.
  213. */
  214. static inline unsigned int phy_find_setting(int speed, int duplex)
  215. {
  216. unsigned int idx = 0;
  217. while (idx < ARRAY_SIZE(settings) &&
  218. (settings[idx].speed != speed || settings[idx].duplex != duplex))
  219. idx++;
  220. return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
  221. }
  222. /**
  223. * phy_find_valid - find a PHY setting that matches the requested features mask
  224. * @idx: The first index in settings[] to search
  225. * @features: A mask of the valid settings
  226. *
  227. * Description: Returns the index of the first valid setting less
  228. * than or equal to the one pointed to by idx, as determined by
  229. * the mask in features. Returns the index of the last setting
  230. * if nothing else matches.
  231. */
  232. static inline unsigned int phy_find_valid(unsigned int idx, u32 features)
  233. {
  234. while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features))
  235. idx++;
  236. return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
  237. }
  238. /**
  239. * phy_check_valid - check if there is a valid PHY setting which matches
  240. * speed, duplex, and feature mask
  241. * @speed: speed to match
  242. * @duplex: duplex to match
  243. * @features: A mask of the valid settings
  244. *
  245. * Description: Returns true if there is a valid setting, false otherwise.
  246. */
  247. static inline bool phy_check_valid(int speed, int duplex, u32 features)
  248. {
  249. unsigned int idx;
  250. idx = phy_find_valid(phy_find_setting(speed, duplex), features);
  251. return settings[idx].speed == speed && settings[idx].duplex == duplex &&
  252. (settings[idx].setting & features);
  253. }
  254. /**
  255. * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
  256. * @phydev: the target phy_device struct
  257. *
  258. * Description: Make sure the PHY is set to supported speeds and
  259. * duplexes. Drop down by one in this order: 1000/FULL,
  260. * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
  261. */
  262. static void phy_sanitize_settings(struct phy_device *phydev)
  263. {
  264. u32 features = phydev->supported;
  265. unsigned int idx;
  266. /* Sanitize settings based on PHY capabilities */
  267. if ((features & SUPPORTED_Autoneg) == 0)
  268. phydev->autoneg = AUTONEG_DISABLE;
  269. idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex),
  270. features);
  271. phydev->speed = settings[idx].speed;
  272. phydev->duplex = settings[idx].duplex;
  273. }
  274. /**
  275. * phy_ethtool_sset - generic ethtool sset function, handles all the details
  276. * @phydev: target phy_device struct
  277. * @cmd: ethtool_cmd
  278. *
  279. * A few notes about parameter checking:
  280. * - We don't set port or transceiver, so we don't care what they
  281. * were set to.
  282. * - phy_start_aneg() will make sure forced settings are sane, and
  283. * choose the next best ones from the ones selected, so we don't
  284. * care if ethtool tries to give us bad values.
  285. */
  286. int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
  287. {
  288. u32 speed = ethtool_cmd_speed(cmd);
  289. if (cmd->phy_address != phydev->mdio.addr)
  290. return -EINVAL;
  291. /* We make sure that we don't pass unsupported values in to the PHY */
  292. cmd->advertising &= phydev->supported;
  293. /* Verify the settings we care about. */
  294. if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
  295. return -EINVAL;
  296. if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
  297. return -EINVAL;
  298. if (cmd->autoneg == AUTONEG_DISABLE &&
  299. ((speed != SPEED_1000 &&
  300. speed != SPEED_100 &&
  301. speed != SPEED_10) ||
  302. (cmd->duplex != DUPLEX_HALF &&
  303. cmd->duplex != DUPLEX_FULL)))
  304. return -EINVAL;
  305. phydev->autoneg = cmd->autoneg;
  306. phydev->speed = speed;
  307. phydev->advertising = cmd->advertising;
  308. if (AUTONEG_ENABLE == cmd->autoneg)
  309. phydev->advertising |= ADVERTISED_Autoneg;
  310. else
  311. phydev->advertising &= ~ADVERTISED_Autoneg;
  312. phydev->duplex = cmd->duplex;
  313. phydev->mdix = cmd->eth_tp_mdix_ctrl;
  314. /* Restart the PHY */
  315. phy_start_aneg(phydev);
  316. return 0;
  317. }
  318. EXPORT_SYMBOL(phy_ethtool_sset);
  319. int phy_ethtool_ksettings_set(struct phy_device *phydev,
  320. const struct ethtool_link_ksettings *cmd)
  321. {
  322. u8 autoneg = cmd->base.autoneg;
  323. u8 duplex = cmd->base.duplex;
  324. u32 speed = cmd->base.speed;
  325. u32 advertising;
  326. if (cmd->base.phy_address != phydev->mdio.addr)
  327. return -EINVAL;
  328. ethtool_convert_link_mode_to_legacy_u32(&advertising,
  329. cmd->link_modes.advertising);
  330. /* We make sure that we don't pass unsupported values in to the PHY */
  331. advertising &= phydev->supported;
  332. /* Verify the settings we care about. */
  333. if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
  334. return -EINVAL;
  335. if (autoneg == AUTONEG_ENABLE && advertising == 0)
  336. return -EINVAL;
  337. if (autoneg == AUTONEG_DISABLE &&
  338. ((speed != SPEED_1000 &&
  339. speed != SPEED_100 &&
  340. speed != SPEED_10) ||
  341. (duplex != DUPLEX_HALF &&
  342. duplex != DUPLEX_FULL)))
  343. return -EINVAL;
  344. phydev->autoneg = autoneg;
  345. phydev->speed = speed;
  346. phydev->advertising = advertising;
  347. if (autoneg == AUTONEG_ENABLE)
  348. phydev->advertising |= ADVERTISED_Autoneg;
  349. else
  350. phydev->advertising &= ~ADVERTISED_Autoneg;
  351. phydev->duplex = duplex;
  352. phydev->mdix = cmd->base.eth_tp_mdix_ctrl;
  353. /* Restart the PHY */
  354. phy_start_aneg(phydev);
  355. return 0;
  356. }
  357. EXPORT_SYMBOL(phy_ethtool_ksettings_set);
  358. int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
  359. {
  360. cmd->supported = phydev->supported;
  361. cmd->advertising = phydev->advertising;
  362. cmd->lp_advertising = phydev->lp_advertising;
  363. ethtool_cmd_speed_set(cmd, phydev->speed);
  364. cmd->duplex = phydev->duplex;
  365. if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
  366. cmd->port = PORT_BNC;
  367. else
  368. cmd->port = PORT_MII;
  369. cmd->phy_address = phydev->mdio.addr;
  370. cmd->transceiver = phy_is_internal(phydev) ?
  371. XCVR_INTERNAL : XCVR_EXTERNAL;
  372. cmd->autoneg = phydev->autoneg;
  373. cmd->eth_tp_mdix_ctrl = phydev->mdix;
  374. return 0;
  375. }
  376. EXPORT_SYMBOL(phy_ethtool_gset);
  377. int phy_ethtool_ksettings_get(struct phy_device *phydev,
  378. struct ethtool_link_ksettings *cmd)
  379. {
  380. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
  381. phydev->supported);
  382. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
  383. phydev->advertising);
  384. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
  385. phydev->lp_advertising);
  386. cmd->base.speed = phydev->speed;
  387. cmd->base.duplex = phydev->duplex;
  388. if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
  389. cmd->base.port = PORT_BNC;
  390. else
  391. cmd->base.port = PORT_MII;
  392. cmd->base.phy_address = phydev->mdio.addr;
  393. cmd->base.autoneg = phydev->autoneg;
  394. cmd->base.eth_tp_mdix_ctrl = phydev->mdix;
  395. return 0;
  396. }
  397. EXPORT_SYMBOL(phy_ethtool_ksettings_get);
  398. /**
  399. * phy_mii_ioctl - generic PHY MII ioctl interface
  400. * @phydev: the phy_device struct
  401. * @ifr: &struct ifreq for socket ioctl's
  402. * @cmd: ioctl cmd to execute
  403. *
  404. * Note that this function is currently incompatible with the
  405. * PHYCONTROL layer. It changes registers without regard to
  406. * current state. Use at own risk.
  407. */
  408. int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
  409. {
  410. struct mii_ioctl_data *mii_data = if_mii(ifr);
  411. u16 val = mii_data->val_in;
  412. bool change_autoneg = false;
  413. switch (cmd) {
  414. case SIOCGMIIPHY:
  415. mii_data->phy_id = phydev->mdio.addr;
  416. /* fall through */
  417. case SIOCGMIIREG:
  418. mii_data->val_out = mdiobus_read(phydev->mdio.bus,
  419. mii_data->phy_id,
  420. mii_data->reg_num);
  421. return 0;
  422. case SIOCSMIIREG:
  423. if (mii_data->phy_id == phydev->mdio.addr) {
  424. switch (mii_data->reg_num) {
  425. case MII_BMCR:
  426. if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) {
  427. if (phydev->autoneg == AUTONEG_ENABLE)
  428. change_autoneg = true;
  429. phydev->autoneg = AUTONEG_DISABLE;
  430. if (val & BMCR_FULLDPLX)
  431. phydev->duplex = DUPLEX_FULL;
  432. else
  433. phydev->duplex = DUPLEX_HALF;
  434. if (val & BMCR_SPEED1000)
  435. phydev->speed = SPEED_1000;
  436. else if (val & BMCR_SPEED100)
  437. phydev->speed = SPEED_100;
  438. else phydev->speed = SPEED_10;
  439. }
  440. else {
  441. if (phydev->autoneg == AUTONEG_DISABLE)
  442. change_autoneg = true;
  443. phydev->autoneg = AUTONEG_ENABLE;
  444. }
  445. break;
  446. case MII_ADVERTISE:
  447. phydev->advertising = mii_adv_to_ethtool_adv_t(val);
  448. change_autoneg = true;
  449. break;
  450. default:
  451. /* do nothing */
  452. break;
  453. }
  454. }
  455. mdiobus_write(phydev->mdio.bus, mii_data->phy_id,
  456. mii_data->reg_num, val);
  457. if (mii_data->phy_id == phydev->mdio.addr &&
  458. mii_data->reg_num == MII_BMCR &&
  459. val & BMCR_RESET)
  460. return phy_init_hw(phydev);
  461. if (change_autoneg)
  462. return phy_start_aneg(phydev);
  463. return 0;
  464. case SIOCSHWTSTAMP:
  465. if (phydev->drv->hwtstamp)
  466. return phydev->drv->hwtstamp(phydev, ifr);
  467. /* fall through */
  468. default:
  469. return -EOPNOTSUPP;
  470. }
  471. }
  472. EXPORT_SYMBOL(phy_mii_ioctl);
  473. /**
  474. * phy_start_aneg_priv - start auto-negotiation for this PHY device
  475. * @phydev: the phy_device struct
  476. * @sync: indicate whether we should wait for the workqueue cancelation
  477. *
  478. * Description: Sanitizes the settings (if we're not autonegotiating
  479. * them), and then calls the driver's config_aneg function.
  480. * If the PHYCONTROL Layer is operating, we change the state to
  481. * reflect the beginning of Auto-negotiation or forcing.
  482. */
  483. static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
  484. {
  485. bool trigger = 0;
  486. int err;
  487. mutex_lock(&phydev->lock);
  488. if (AUTONEG_DISABLE == phydev->autoneg)
  489. phy_sanitize_settings(phydev);
  490. /* Invalidate LP advertising flags */
  491. phydev->lp_advertising = 0;
  492. err = phydev->drv->config_aneg(phydev);
  493. if (err < 0)
  494. goto out_unlock;
  495. if (phydev->state != PHY_HALTED) {
  496. if (AUTONEG_ENABLE == phydev->autoneg) {
  497. phydev->state = PHY_AN;
  498. phydev->link_timeout = PHY_AN_TIMEOUT;
  499. } else {
  500. phydev->state = PHY_FORCING;
  501. phydev->link_timeout = PHY_FORCE_TIMEOUT;
  502. }
  503. }
  504. /* Re-schedule a PHY state machine to check PHY status because
  505. * negotiation may already be done and aneg interrupt may not be
  506. * generated.
  507. */
  508. if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) {
  509. err = phy_aneg_done(phydev);
  510. if (err > 0) {
  511. trigger = true;
  512. err = 0;
  513. }
  514. }
  515. out_unlock:
  516. mutex_unlock(&phydev->lock);
  517. if (trigger)
  518. phy_trigger_machine(phydev, sync);
  519. return err;
  520. }
  521. /**
  522. * phy_start_aneg - start auto-negotiation for this PHY device
  523. * @phydev: the phy_device struct
  524. *
  525. * Description: Sanitizes the settings (if we're not autonegotiating
  526. * them), and then calls the driver's config_aneg function.
  527. * If the PHYCONTROL Layer is operating, we change the state to
  528. * reflect the beginning of Auto-negotiation or forcing.
  529. */
  530. int phy_start_aneg(struct phy_device *phydev)
  531. {
  532. return phy_start_aneg_priv(phydev, true);
  533. }
  534. EXPORT_SYMBOL(phy_start_aneg);
  535. /**
  536. * phy_start_machine - start PHY state machine tracking
  537. * @phydev: the phy_device struct
  538. *
  539. * Description: The PHY infrastructure can run a state machine
  540. * which tracks whether the PHY is starting up, negotiating,
  541. * etc. This function starts the timer which tracks the state
  542. * of the PHY. If you want to maintain your own state machine,
  543. * do not call this function.
  544. */
  545. void phy_start_machine(struct phy_device *phydev)
  546. {
  547. queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ);
  548. }
  549. /**
  550. * phy_trigger_machine - trigger the state machine to run
  551. *
  552. * @phydev: the phy_device struct
  553. * @sync: indicate whether we should wait for the workqueue cancelation
  554. *
  555. * Description: There has been a change in state which requires that the
  556. * state machine runs.
  557. */
  558. void phy_trigger_machine(struct phy_device *phydev, bool sync)
  559. {
  560. if (sync)
  561. cancel_delayed_work_sync(&phydev->state_queue);
  562. else
  563. cancel_delayed_work(&phydev->state_queue);
  564. queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
  565. }
  566. /**
  567. * phy_stop_machine - stop the PHY state machine tracking
  568. * @phydev: target phy_device struct
  569. *
  570. * Description: Stops the state machine timer, sets the state to UP
  571. * (unless it wasn't up yet). This function must be called BEFORE
  572. * phy_detach.
  573. */
  574. void phy_stop_machine(struct phy_device *phydev)
  575. {
  576. cancel_delayed_work_sync(&phydev->state_queue);
  577. mutex_lock(&phydev->lock);
  578. if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
  579. phydev->state = PHY_UP;
  580. mutex_unlock(&phydev->lock);
  581. }
  582. /**
  583. * phy_error - enter HALTED state for this PHY device
  584. * @phydev: target phy_device struct
  585. *
  586. * Moves the PHY to the HALTED state in response to a read
  587. * or write error, and tells the controller the link is down.
  588. * Must not be called from interrupt context, or while the
  589. * phydev->lock is held.
  590. */
  591. static void phy_error(struct phy_device *phydev)
  592. {
  593. mutex_lock(&phydev->lock);
  594. phydev->state = PHY_HALTED;
  595. mutex_unlock(&phydev->lock);
  596. phy_trigger_machine(phydev, false);
  597. }
  598. /**
  599. * phy_interrupt - PHY interrupt handler
  600. * @irq: interrupt line
  601. * @phy_dat: phy_device pointer
  602. *
  603. * Description: When a PHY interrupt occurs, the handler disables
  604. * interrupts, and schedules a work task to clear the interrupt.
  605. */
  606. static irqreturn_t phy_interrupt(int irq, void *phy_dat)
  607. {
  608. struct phy_device *phydev = phy_dat;
  609. if (PHY_HALTED == phydev->state)
  610. return IRQ_NONE; /* It can't be ours. */
  611. /* The MDIO bus is not allowed to be written in interrupt
  612. * context, so we need to disable the irq here. A work
  613. * queue will write the PHY to disable and clear the
  614. * interrupt, and then reenable the irq line.
  615. */
  616. disable_irq_nosync(irq);
  617. atomic_inc(&phydev->irq_disable);
  618. queue_work(system_power_efficient_wq, &phydev->phy_queue);
  619. return IRQ_HANDLED;
  620. }
  621. /**
  622. * phy_enable_interrupts - Enable the interrupts from the PHY side
  623. * @phydev: target phy_device struct
  624. */
  625. static int phy_enable_interrupts(struct phy_device *phydev)
  626. {
  627. int err = phy_clear_interrupt(phydev);
  628. if (err < 0)
  629. return err;
  630. return phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
  631. }
  632. /**
  633. * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
  634. * @phydev: target phy_device struct
  635. */
  636. static int phy_disable_interrupts(struct phy_device *phydev)
  637. {
  638. int err;
  639. /* Disable PHY interrupts */
  640. err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
  641. if (err)
  642. goto phy_err;
  643. /* Clear the interrupt */
  644. err = phy_clear_interrupt(phydev);
  645. if (err)
  646. goto phy_err;
  647. return 0;
  648. phy_err:
  649. phy_error(phydev);
  650. return err;
  651. }
  652. /**
  653. * phy_start_interrupts - request and enable interrupts for a PHY device
  654. * @phydev: target phy_device struct
  655. *
  656. * Description: Request the interrupt for the given PHY.
  657. * If this fails, then we set irq to PHY_POLL.
  658. * Otherwise, we enable the interrupts in the PHY.
  659. * This should only be called with a valid IRQ number.
  660. * Returns 0 on success or < 0 on error.
  661. */
  662. int phy_start_interrupts(struct phy_device *phydev)
  663. {
  664. atomic_set(&phydev->irq_disable, 0);
  665. if (request_irq(phydev->irq, phy_interrupt,
  666. IRQF_SHARED,
  667. "phy_interrupt",
  668. phydev) < 0) {
  669. pr_warn("%s: Can't get IRQ %d (PHY)\n",
  670. phydev->mdio.bus->name, phydev->irq);
  671. phydev->irq = PHY_POLL;
  672. return 0;
  673. }
  674. return phy_enable_interrupts(phydev);
  675. }
  676. EXPORT_SYMBOL(phy_start_interrupts);
  677. /**
  678. * phy_stop_interrupts - disable interrupts from a PHY device
  679. * @phydev: target phy_device struct
  680. */
  681. int phy_stop_interrupts(struct phy_device *phydev)
  682. {
  683. int err = phy_disable_interrupts(phydev);
  684. if (err)
  685. phy_error(phydev);
  686. free_irq(phydev->irq, phydev);
  687. /* Cannot call flush_scheduled_work() here as desired because
  688. * of rtnl_lock(), but we do not really care about what would
  689. * be done, except from enable_irq(), so cancel any work
  690. * possibly pending and take care of the matter below.
  691. */
  692. cancel_work_sync(&phydev->phy_queue);
  693. /* If work indeed has been cancelled, disable_irq() will have
  694. * been left unbalanced from phy_interrupt() and enable_irq()
  695. * has to be called so that other devices on the line work.
  696. */
  697. while (atomic_dec_return(&phydev->irq_disable) >= 0)
  698. enable_irq(phydev->irq);
  699. return err;
  700. }
  701. EXPORT_SYMBOL(phy_stop_interrupts);
  702. /**
  703. * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes
  704. * @work: work_struct that describes the work to be done
  705. */
  706. void phy_change(struct work_struct *work)
  707. {
  708. struct phy_device *phydev =
  709. container_of(work, struct phy_device, phy_queue);
  710. if (phy_interrupt_is_valid(phydev)) {
  711. if (phydev->drv->did_interrupt &&
  712. !phydev->drv->did_interrupt(phydev))
  713. goto ignore;
  714. if (phy_disable_interrupts(phydev))
  715. goto phy_err;
  716. }
  717. mutex_lock(&phydev->lock);
  718. if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
  719. phydev->state = PHY_CHANGELINK;
  720. mutex_unlock(&phydev->lock);
  721. if (phy_interrupt_is_valid(phydev)) {
  722. atomic_dec(&phydev->irq_disable);
  723. enable_irq(phydev->irq);
  724. /* Reenable interrupts */
  725. if (PHY_HALTED != phydev->state &&
  726. phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
  727. goto irq_enable_err;
  728. }
  729. /* reschedule state queue work to run as soon as possible */
  730. phy_trigger_machine(phydev, true);
  731. return;
  732. ignore:
  733. atomic_dec(&phydev->irq_disable);
  734. enable_irq(phydev->irq);
  735. return;
  736. irq_enable_err:
  737. disable_irq(phydev->irq);
  738. atomic_inc(&phydev->irq_disable);
  739. phy_err:
  740. phy_error(phydev);
  741. }
  742. /**
  743. * phy_stop - Bring down the PHY link, and stop checking the status
  744. * @phydev: target phy_device struct
  745. */
  746. void phy_stop(struct phy_device *phydev)
  747. {
  748. mutex_lock(&phydev->lock);
  749. if (PHY_HALTED == phydev->state)
  750. goto out_unlock;
  751. if (phy_interrupt_is_valid(phydev)) {
  752. /* Disable PHY Interrupts */
  753. phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
  754. /* Clear any pending interrupts */
  755. phy_clear_interrupt(phydev);
  756. }
  757. phydev->state = PHY_HALTED;
  758. out_unlock:
  759. mutex_unlock(&phydev->lock);
  760. /* Cannot call flush_scheduled_work() here as desired because
  761. * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
  762. * will not reenable interrupts.
  763. */
  764. }
  765. EXPORT_SYMBOL(phy_stop);
  766. /**
  767. * phy_start - start or restart a PHY device
  768. * @phydev: target phy_device struct
  769. *
  770. * Description: Indicates the attached device's readiness to
  771. * handle PHY-related work. Used during startup to start the
  772. * PHY, and after a call to phy_stop() to resume operation.
  773. * Also used to indicate the MDIO bus has cleared an error
  774. * condition.
  775. */
  776. void phy_start(struct phy_device *phydev)
  777. {
  778. bool do_resume = false;
  779. int err = 0;
  780. mutex_lock(&phydev->lock);
  781. switch (phydev->state) {
  782. case PHY_STARTING:
  783. phydev->state = PHY_PENDING;
  784. break;
  785. case PHY_READY:
  786. phydev->state = PHY_UP;
  787. break;
  788. case PHY_HALTED:
  789. /* make sure interrupts are re-enabled for the PHY */
  790. if (phydev->irq != PHY_POLL) {
  791. err = phy_enable_interrupts(phydev);
  792. if (err < 0)
  793. break;
  794. }
  795. phydev->state = PHY_RESUMING;
  796. do_resume = true;
  797. break;
  798. default:
  799. break;
  800. }
  801. mutex_unlock(&phydev->lock);
  802. /* if phy was suspended, bring the physical link up again */
  803. if (do_resume)
  804. phy_resume(phydev);
  805. phy_trigger_machine(phydev, true);
  806. }
  807. EXPORT_SYMBOL(phy_start);
  808. /**
  809. * phy_state_machine - Handle the state machine
  810. * @work: work_struct that describes the work to be done
  811. */
  812. void phy_state_machine(struct work_struct *work)
  813. {
  814. struct delayed_work *dwork = to_delayed_work(work);
  815. struct phy_device *phydev =
  816. container_of(dwork, struct phy_device, state_queue);
  817. bool needs_aneg = false, do_suspend = false;
  818. enum phy_state old_state;
  819. int err = 0;
  820. int old_link;
  821. mutex_lock(&phydev->lock);
  822. old_state = phydev->state;
  823. if (phydev->drv->link_change_notify)
  824. phydev->drv->link_change_notify(phydev);
  825. switch (phydev->state) {
  826. case PHY_DOWN:
  827. case PHY_STARTING:
  828. case PHY_READY:
  829. case PHY_PENDING:
  830. break;
  831. case PHY_UP:
  832. needs_aneg = true;
  833. phydev->link_timeout = PHY_AN_TIMEOUT;
  834. break;
  835. case PHY_AN:
  836. err = phy_read_status(phydev);
  837. if (err < 0)
  838. break;
  839. /* If the link is down, give up on negotiation for now */
  840. if (!phydev->link) {
  841. phydev->state = PHY_NOLINK;
  842. netif_carrier_off(phydev->attached_dev);
  843. phydev->adjust_link(phydev->attached_dev);
  844. break;
  845. }
  846. /* Check if negotiation is done. Break if there's an error */
  847. err = phy_aneg_done(phydev);
  848. if (err < 0)
  849. break;
  850. /* If AN is done, we're running */
  851. if (err > 0) {
  852. phydev->state = PHY_RUNNING;
  853. netif_carrier_on(phydev->attached_dev);
  854. phydev->adjust_link(phydev->attached_dev);
  855. } else if (0 == phydev->link_timeout--)
  856. needs_aneg = true;
  857. break;
  858. case PHY_NOLINK:
  859. if (phy_interrupt_is_valid(phydev))
  860. break;
  861. err = phy_read_status(phydev);
  862. if (err)
  863. break;
  864. if (phydev->link) {
  865. if (AUTONEG_ENABLE == phydev->autoneg) {
  866. err = phy_aneg_done(phydev);
  867. if (err < 0)
  868. break;
  869. if (!err) {
  870. phydev->state = PHY_AN;
  871. phydev->link_timeout = PHY_AN_TIMEOUT;
  872. break;
  873. }
  874. }
  875. phydev->state = PHY_RUNNING;
  876. netif_carrier_on(phydev->attached_dev);
  877. phydev->adjust_link(phydev->attached_dev);
  878. }
  879. break;
  880. case PHY_FORCING:
  881. err = genphy_update_link(phydev);
  882. if (err)
  883. break;
  884. if (phydev->link) {
  885. phydev->state = PHY_RUNNING;
  886. netif_carrier_on(phydev->attached_dev);
  887. } else {
  888. if (0 == phydev->link_timeout--)
  889. needs_aneg = true;
  890. }
  891. phydev->adjust_link(phydev->attached_dev);
  892. break;
  893. case PHY_RUNNING:
  894. /* Only register a CHANGE if we are polling and link changed
  895. * since latest checking.
  896. */
  897. if (phydev->irq == PHY_POLL) {
  898. old_link = phydev->link;
  899. err = phy_read_status(phydev);
  900. if (err)
  901. break;
  902. if (old_link != phydev->link)
  903. phydev->state = PHY_CHANGELINK;
  904. }
  905. /*
  906. * Failsafe: check that nobody set phydev->link=0 between two
  907. * poll cycles, otherwise we won't leave RUNNING state as long
  908. * as link remains down.
  909. */
  910. if (!phydev->link && phydev->state == PHY_RUNNING) {
  911. phydev->state = PHY_CHANGELINK;
  912. phydev_err(phydev, "no link in PHY_RUNNING\n");
  913. }
  914. break;
  915. case PHY_CHANGELINK:
  916. err = phy_read_status(phydev);
  917. if (err)
  918. break;
  919. if (phydev->link) {
  920. phydev->state = PHY_RUNNING;
  921. netif_carrier_on(phydev->attached_dev);
  922. } else {
  923. phydev->state = PHY_NOLINK;
  924. netif_carrier_off(phydev->attached_dev);
  925. }
  926. phydev->adjust_link(phydev->attached_dev);
  927. if (phy_interrupt_is_valid(phydev))
  928. err = phy_config_interrupt(phydev,
  929. PHY_INTERRUPT_ENABLED);
  930. break;
  931. case PHY_HALTED:
  932. if (phydev->link) {
  933. phydev->link = 0;
  934. netif_carrier_off(phydev->attached_dev);
  935. phydev->adjust_link(phydev->attached_dev);
  936. do_suspend = true;
  937. }
  938. break;
  939. case PHY_RESUMING:
  940. if (AUTONEG_ENABLE == phydev->autoneg) {
  941. err = phy_aneg_done(phydev);
  942. if (err < 0)
  943. break;
  944. /* err > 0 if AN is done.
  945. * Otherwise, it's 0, and we're still waiting for AN
  946. */
  947. if (err > 0) {
  948. err = phy_read_status(phydev);
  949. if (err)
  950. break;
  951. if (phydev->link) {
  952. phydev->state = PHY_RUNNING;
  953. netif_carrier_on(phydev->attached_dev);
  954. } else {
  955. phydev->state = PHY_NOLINK;
  956. }
  957. phydev->adjust_link(phydev->attached_dev);
  958. } else {
  959. phydev->state = PHY_AN;
  960. phydev->link_timeout = PHY_AN_TIMEOUT;
  961. }
  962. } else {
  963. err = phy_read_status(phydev);
  964. if (err)
  965. break;
  966. if (phydev->link) {
  967. phydev->state = PHY_RUNNING;
  968. netif_carrier_on(phydev->attached_dev);
  969. } else {
  970. phydev->state = PHY_NOLINK;
  971. }
  972. phydev->adjust_link(phydev->attached_dev);
  973. }
  974. break;
  975. }
  976. mutex_unlock(&phydev->lock);
  977. if (needs_aneg)
  978. err = phy_start_aneg_priv(phydev, false);
  979. else if (do_suspend)
  980. phy_suspend(phydev);
  981. if (err < 0)
  982. phy_error(phydev);
  983. phydev_dbg(phydev, "PHY state change %s -> %s\n",
  984. phy_state_to_str(old_state),
  985. phy_state_to_str(phydev->state));
  986. /* Only re-schedule a PHY state machine change if we are polling the
  987. * PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving
  988. * between states from phy_mac_interrupt()
  989. */
  990. if (phydev->irq == PHY_POLL)
  991. queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
  992. PHY_STATE_TIME * HZ);
  993. }
  994. void phy_mac_interrupt(struct phy_device *phydev, int new_link)
  995. {
  996. phydev->link = new_link;
  997. /* Trigger a state machine change */
  998. queue_work(system_power_efficient_wq, &phydev->phy_queue);
  999. }
  1000. EXPORT_SYMBOL(phy_mac_interrupt);
  1001. static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,
  1002. int addr)
  1003. {
  1004. /* Write the desired MMD Devad */
  1005. bus->write(bus, addr, MII_MMD_CTRL, devad);
  1006. /* Write the desired MMD register address */
  1007. bus->write(bus, addr, MII_MMD_DATA, prtad);
  1008. /* Select the Function : DATA with no post increment */
  1009. bus->write(bus, addr, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
  1010. }
  1011. /**
  1012. * phy_read_mmd_indirect - reads data from the MMD registers
  1013. * @phydev: The PHY device bus
  1014. * @prtad: MMD Address
  1015. * @devad: MMD DEVAD
  1016. *
  1017. * Description: it reads data from the MMD registers (clause 22 to access to
  1018. * clause 45) of the specified phy address.
  1019. * To read these register we have:
  1020. * 1) Write reg 13 // DEVAD
  1021. * 2) Write reg 14 // MMD Address
  1022. * 3) Write reg 13 // MMD Data Command for MMD DEVAD
  1023. * 3) Read reg 14 // Read MMD data
  1024. */
  1025. int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, int devad)
  1026. {
  1027. struct phy_driver *phydrv = phydev->drv;
  1028. int addr = phydev->mdio.addr;
  1029. int value = -1;
  1030. if (!phydrv->read_mmd_indirect) {
  1031. struct mii_bus *bus = phydev->mdio.bus;
  1032. mutex_lock(&bus->mdio_lock);
  1033. mmd_phy_indirect(bus, prtad, devad, addr);
  1034. /* Read the content of the MMD's selected register */
  1035. value = bus->read(bus, addr, MII_MMD_DATA);
  1036. mutex_unlock(&bus->mdio_lock);
  1037. } else {
  1038. value = phydrv->read_mmd_indirect(phydev, prtad, devad, addr);
  1039. }
  1040. return value;
  1041. }
  1042. EXPORT_SYMBOL(phy_read_mmd_indirect);
  1043. /**
  1044. * phy_write_mmd_indirect - writes data to the MMD registers
  1045. * @phydev: The PHY device
  1046. * @prtad: MMD Address
  1047. * @devad: MMD DEVAD
  1048. * @data: data to write in the MMD register
  1049. *
  1050. * Description: Write data from the MMD registers of the specified
  1051. * phy address.
  1052. * To write these register we have:
  1053. * 1) Write reg 13 // DEVAD
  1054. * 2) Write reg 14 // MMD Address
  1055. * 3) Write reg 13 // MMD Data Command for MMD DEVAD
  1056. * 3) Write reg 14 // Write MMD data
  1057. */
  1058. void phy_write_mmd_indirect(struct phy_device *phydev, int prtad,
  1059. int devad, u32 data)
  1060. {
  1061. struct phy_driver *phydrv = phydev->drv;
  1062. int addr = phydev->mdio.addr;
  1063. if (!phydrv->write_mmd_indirect) {
  1064. struct mii_bus *bus = phydev->mdio.bus;
  1065. mutex_lock(&bus->mdio_lock);
  1066. mmd_phy_indirect(bus, prtad, devad, addr);
  1067. /* Write the data into MMD's selected register */
  1068. bus->write(bus, addr, MII_MMD_DATA, data);
  1069. mutex_unlock(&bus->mdio_lock);
  1070. } else {
  1071. phydrv->write_mmd_indirect(phydev, prtad, devad, addr, data);
  1072. }
  1073. }
  1074. EXPORT_SYMBOL(phy_write_mmd_indirect);
  1075. /**
  1076. * phy_init_eee - init and check the EEE feature
  1077. * @phydev: target phy_device struct
  1078. * @clk_stop_enable: PHY may stop the clock during LPI
  1079. *
  1080. * Description: it checks if the Energy-Efficient Ethernet (EEE)
  1081. * is supported by looking at the MMD registers 3.20 and 7.60/61
  1082. * and it programs the MMD register 3.0 setting the "Clock stop enable"
  1083. * bit if required.
  1084. */
  1085. int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
  1086. {
  1087. /* According to 802.3az,the EEE is supported only in full duplex-mode.
  1088. * Also EEE feature is active when core is operating with MII, GMII
  1089. * or RGMII (all kinds). Internal PHYs are also allowed to proceed and
  1090. * should return an error if they do not support EEE.
  1091. */
  1092. if ((phydev->duplex == DUPLEX_FULL) &&
  1093. ((phydev->interface == PHY_INTERFACE_MODE_MII) ||
  1094. (phydev->interface == PHY_INTERFACE_MODE_GMII) ||
  1095. phy_interface_is_rgmii(phydev) ||
  1096. phy_is_internal(phydev))) {
  1097. int eee_lp, eee_cap, eee_adv;
  1098. u32 lp, cap, adv;
  1099. int status;
  1100. /* Read phy status to properly get the right settings */
  1101. status = phy_read_status(phydev);
  1102. if (status)
  1103. return status;
  1104. /* First check if the EEE ability is supported */
  1105. eee_cap = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE,
  1106. MDIO_MMD_PCS);
  1107. if (eee_cap <= 0)
  1108. goto eee_exit_err;
  1109. cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
  1110. if (!cap)
  1111. goto eee_exit_err;
  1112. /* Check which link settings negotiated and verify it in
  1113. * the EEE advertising registers.
  1114. */
  1115. eee_lp = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE,
  1116. MDIO_MMD_AN);
  1117. if (eee_lp <= 0)
  1118. goto eee_exit_err;
  1119. eee_adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
  1120. MDIO_MMD_AN);
  1121. if (eee_adv <= 0)
  1122. goto eee_exit_err;
  1123. adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
  1124. lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
  1125. if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
  1126. goto eee_exit_err;
  1127. if (clk_stop_enable) {
  1128. /* Configure the PHY to stop receiving xMII
  1129. * clock while it is signaling LPI.
  1130. */
  1131. int val = phy_read_mmd_indirect(phydev, MDIO_CTRL1,
  1132. MDIO_MMD_PCS);
  1133. if (val < 0)
  1134. return val;
  1135. val |= MDIO_PCS_CTRL1_CLKSTOP_EN;
  1136. phy_write_mmd_indirect(phydev, MDIO_CTRL1,
  1137. MDIO_MMD_PCS, val);
  1138. }
  1139. return 0; /* EEE supported */
  1140. }
  1141. eee_exit_err:
  1142. return -EPROTONOSUPPORT;
  1143. }
  1144. EXPORT_SYMBOL(phy_init_eee);
  1145. /**
  1146. * phy_get_eee_err - report the EEE wake error count
  1147. * @phydev: target phy_device struct
  1148. *
  1149. * Description: it is to report the number of time where the PHY
  1150. * failed to complete its normal wake sequence.
  1151. */
  1152. int phy_get_eee_err(struct phy_device *phydev)
  1153. {
  1154. return phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_WK_ERR, MDIO_MMD_PCS);
  1155. }
  1156. EXPORT_SYMBOL(phy_get_eee_err);
  1157. /**
  1158. * phy_ethtool_get_eee - get EEE supported and status
  1159. * @phydev: target phy_device struct
  1160. * @data: ethtool_eee data
  1161. *
  1162. * Description: it reportes the Supported/Advertisement/LP Advertisement
  1163. * capabilities.
  1164. */
  1165. int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
  1166. {
  1167. int val;
  1168. /* Get Supported EEE */
  1169. val = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE, MDIO_MMD_PCS);
  1170. if (val < 0)
  1171. return val;
  1172. data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
  1173. /* Get advertisement EEE */
  1174. val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN);
  1175. if (val < 0)
  1176. return val;
  1177. data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
  1178. /* Get LP advertisement EEE */
  1179. val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE, MDIO_MMD_AN);
  1180. if (val < 0)
  1181. return val;
  1182. data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
  1183. return 0;
  1184. }
  1185. EXPORT_SYMBOL(phy_ethtool_get_eee);
  1186. /**
  1187. * phy_ethtool_set_eee - set EEE supported and status
  1188. * @phydev: target phy_device struct
  1189. * @data: ethtool_eee data
  1190. *
  1191. * Description: it is to program the Advertisement EEE register.
  1192. */
  1193. int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
  1194. {
  1195. int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
  1196. /* Mask prohibited EEE modes */
  1197. val &= ~phydev->eee_broken_modes;
  1198. phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN, val);
  1199. return 0;
  1200. }
  1201. EXPORT_SYMBOL(phy_ethtool_set_eee);
  1202. int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
  1203. {
  1204. if (phydev->drv->set_wol)
  1205. return phydev->drv->set_wol(phydev, wol);
  1206. return -EOPNOTSUPP;
  1207. }
  1208. EXPORT_SYMBOL(phy_ethtool_set_wol);
  1209. void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
  1210. {
  1211. if (phydev->drv->get_wol)
  1212. phydev->drv->get_wol(phydev, wol);
  1213. }
  1214. EXPORT_SYMBOL(phy_ethtool_get_wol);
  1215. int phy_ethtool_get_link_ksettings(struct net_device *ndev,
  1216. struct ethtool_link_ksettings *cmd)
  1217. {
  1218. struct phy_device *phydev = ndev->phydev;
  1219. if (!phydev)
  1220. return -ENODEV;
  1221. return phy_ethtool_ksettings_get(phydev, cmd);
  1222. }
  1223. EXPORT_SYMBOL(phy_ethtool_get_link_ksettings);
  1224. int phy_ethtool_set_link_ksettings(struct net_device *ndev,
  1225. const struct ethtool_link_ksettings *cmd)
  1226. {
  1227. struct phy_device *phydev = ndev->phydev;
  1228. if (!phydev)
  1229. return -ENODEV;
  1230. return phy_ethtool_ksettings_set(phydev, cmd);
  1231. }
  1232. EXPORT_SYMBOL(phy_ethtool_set_link_ksettings);