phy-keystone-serdes.c 63 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519
  1. /*
  2. * Texas Instruments Keystone SerDes driver
  3. * Authors: WingMan Kwok <w-kwok2@ti.com>
  4. *
  5. * This is the SerDes Phy driver for Keystone devices. This is
  6. * required to support PCIe RC functionality based on designware
  7. * PCIe hardware, gbe and 10gbe found on these devices.
  8. *
  9. * Revision History:
  10. * 3.3.0.2c
  11. * - Full update based on CSL version 3.3.0.2c
  12. * - This update requires the remodelling of each SerDes lane
  13. * as a separate PHY device, as opposed to each SerDes as a
  14. * separate PHY device prior to this patch.
  15. *
  16. * 1.0.0
  17. * - Initial revision.
  18. *
  19. * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
  20. *
  21. * Redistribution and use in source and binary forms, with or without
  22. * modification, are permitted provided that the following conditions
  23. * are met:
  24. *
  25. * Redistributions of source code must retain the above copyright
  26. * notice, this list of conditions and the following disclaimer.
  27. *
  28. * Redistributions in binary form must reproduce the above copyright
  29. * notice, this list of conditions and the following disclaimer in the
  30. * documentation and/or other materials provided with the
  31. * distribution.
  32. *
  33. * Neither the name of Texas Instruments Incorporated nor the names of
  34. * its contributors may be used to endorse or promote products derived
  35. * from this software without specific prior written permission.
  36. *
  37. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  38. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  39. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  40. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  41. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  42. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  43. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  44. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  45. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  46. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  47. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  48. */
  49. #include <linux/module.h>
  50. #include <linux/io.h>
  51. #include <linux/mfd/syscon.h>
  52. #include <linux/delay.h>
  53. #include <linux/firmware.h>
  54. #include <linux/of.h>
  55. #include <linux/of_address.h>
  56. #include <linux/of_platform.h>
  57. #include <linux/regmap.h>
  58. #include <linux/sizes.h>
  59. #include <linux/phy/phy.h>
  60. #include <linux/platform_device.h>
  61. #define KSERDES_SS_OFFSET 0x1fc0
  62. #define MOD_VER_REG (KSERDES_SS_OFFSET + 0x00)
  63. #define MEM_ADR_REG (KSERDES_SS_OFFSET + 0x04)
  64. #define MEM_DAT_REG (KSERDES_SS_OFFSET + 0x08)
  65. #define MEM_DATINC_REG (KSERDES_SS_OFFSET + 0x0c)
  66. #define CPU_CTRL_REG (KSERDES_SS_OFFSET + 0x10)
  67. #define LANE_CTRL_STS_REG(x) (KSERDES_SS_OFFSET + 0x20 + (x * 0x04))
  68. #define LINK_LOSS_WAIT_REG (KSERDES_SS_OFFSET + 0x30)
  69. #define PLL_CTRL_REG (KSERDES_SS_OFFSET + 0x34)
  70. #define CMU0_SS_OFFSET 0x0000
  71. #define CMU0_REG(x) (CMU0_SS_OFFSET + x)
  72. #define LANE0_SS_OFFSET 0x0200
  73. #define LANEX_SS_OFFSET(x) (LANE0_SS_OFFSET * (x + 1))
  74. #define LANEX_REG(x, y) (LANEX_SS_OFFSET(x) + y)
  75. #define CML_SS_OFFSET 0x0a00
  76. #define CML_REG(x) (CML_SS_OFFSET + x)
  77. #define CMU1_SS_OFFSET 0x0c00
  78. #define CMU1_REG(x) (CMU1_SS_OFFSET + x)
  79. #define PCSR_OFFSET(x) (x * 0x80)
  80. #define PCSR_TX_CTL(x) (PCSR_OFFSET(x) + 0x00)
  81. #define PCSR_TX_STATUS(x) (PCSR_OFFSET(x) + 0x04)
  82. #define PCSR_RX_CTL(x) (PCSR_OFFSET(x) + 0x08)
  83. #define PCSR_RX_STATUS(x) (PCSR_OFFSET(x) + 0x0C)
  84. #define XGE_CTRL_OFFSET 0x0c
  85. #define PCIE_PL_GEN2_OFFSET 0x180c
  86. #define reg_rmw(addr, value, mask) \
  87. writel(((readl(addr) & (~(mask))) | (value & (mask))), (addr))
  88. #define FINSR(base, offset, msb, lsb, val) \
  89. reg_rmw((base) + (offset), ((val) << (lsb)), GENMASK((msb), (lsb)))
  90. #define FEXTR(val, msb, lsb) \
  91. (((val) >> (lsb)) & ((1 << ((msb) - (lsb) + 1)) - 1))
  92. #define MOD_VER(serdes) \
  93. ((kserdes_readl(serdes, MOD_VER_REG) >> 16) & 0xffff)
  94. #define PHY_A(serdes) (MOD_VER(serdes) != 0x4eba)
  95. #define FOUR_LANE(serdes) \
  96. ((MOD_VER(serdes) == 0x4eb9) || (MOD_VER(serdes) == 0x4ebd))
  97. #define KSERDES_MAX_LANES 4
  98. #define MAX_COEFS 5
  99. #define MAX_CMP 5
  100. #define OFFSET_SAMPLES 100
  101. #define for_each_cmp(i) \
  102. for (i = 1; i < MAX_CMP; i++)
  103. #define CPU_EN BIT(31)
  104. #define CPU_GO BIT(30)
  105. #define POR_EN BIT(29)
  106. #define CPUREG_EN BIT(28)
  107. #define AUTONEG_CTL BIT(27)
  108. #define DATASPLIT BIT(26)
  109. #define LNKTRN_SIG_DET BIT(8)
  110. #define PLL_ENABLE_1P25G 0xe0000000
  111. #define LANE_CTRL_1P25G 0xf800f8c0
  112. #define XFM_FLUSH_CMD 0x00009c9c
  113. #define ANEG_LINK_CTL_10GKR_MASK GENMASK(21, 20)
  114. #define ANEG_LINK_CTL_1GKX_MASK GENMASK(17, 16)
  115. #define ANEG_LINK_CTL_1G10G_MASK \
  116. (ANEG_LINK_CTL_10GKR_MASK | ANEG_LINK_CTL_1GKX_MASK)
  117. #define ANEG_1G_10G_OPT_MASK GENMASK(7, 5)
  118. #define SERDES_REG_INDEX 0
  119. #define KSERDES_XFW_MEM_SIZE SZ_64K
  120. #define KSERDES_XFW_CONFIG_MEM_SIZE SZ_64
  121. #define KSERDES_XFW_NUM_PARAMS 5
  122. #define KSERDES_XFW_CONFIG_START_ADDR \
  123. (KSERDES_XFW_MEM_SIZE - KSERDES_XFW_CONFIG_MEM_SIZE)
  124. #define KSERDES_XFW_PARAM_START_ADDR \
  125. (KSERDES_XFW_MEM_SIZE - (KSERDES_XFW_NUM_PARAMS * 4))
  126. #define LANE_ENABLE(sc, n) ((sc)->lane[n].enable)
  127. enum kserdes_link_rate {
  128. KSERDES_LINK_RATE_1P25G = 1250000,
  129. KSERDES_LINK_RATE_3P125G = 3125000,
  130. KSERDES_LINK_RATE_4P9152G = 4915200,
  131. KSERDES_LINK_RATE_5G = 5000000,
  132. KSERDES_LINK_RATE_6P144G = 6144000,
  133. KSERDES_LINK_RATE_6P25G = 6250000,
  134. KSERDES_LINK_RATE_7P3728G = 7372800,
  135. KSERDES_LINK_RATE_9P8304G = 9830400,
  136. KSERDES_LINK_RATE_10G = 10000000,
  137. KSERDES_LINK_RATE_10P3125G = 10312500,
  138. KSERDES_LINK_RATE_12P5G = 12500000,
  139. };
  140. enum kserdes_lane_ctrl_rate {
  141. KSERDES_FULL_RATE,
  142. KSERDES_HALF_RATE,
  143. KSERDES_QUARTER_RATE,
  144. };
  145. enum kserdes_phy_type {
  146. KSERDES_PHY_SGMII,
  147. KSERDES_PHY_XGE,
  148. KSERDES_PHY_PCIE,
  149. KSERDES_PHY_HYPERLINK,
  150. };
  151. struct kserdes_tx_coeff {
  152. u32 c1;
  153. u32 c2;
  154. u32 cm;
  155. u32 att;
  156. u32 vreg;
  157. };
  158. struct kserdes_equalizer {
  159. u32 att;
  160. u32 boost;
  161. };
  162. struct kserdes_lane_config {
  163. bool enable;
  164. u32 ctrl_rate;
  165. struct kserdes_tx_coeff tx_coeff;
  166. struct kserdes_equalizer rx_start;
  167. struct kserdes_equalizer rx_force;
  168. bool loopback;
  169. };
  170. struct kserdes_fw_config {
  171. bool on;
  172. u32 rate;
  173. u32 link_loss_wait;
  174. u32 lane_seeds;
  175. u32 fast_train;
  176. u32 active_lane;
  177. u32 c1, c2, cm, attn, boost, dlpf, rxcal;
  178. u32 lane_config[KSERDES_MAX_LANES];
  179. };
  180. struct kserdes_lane_dlev_out {
  181. u32 delay;
  182. int coef_vals[MAX_COEFS];
  183. };
  184. struct kserdes_dlev_out {
  185. struct kserdes_lane_dlev_out lane_dlev_out[KSERDES_MAX_LANES];
  186. };
  187. struct kserdes_cmp_coef_ofs {
  188. u32 cmp;
  189. u32 coef1;
  190. u32 coef2;
  191. u32 coef3;
  192. u32 coef4;
  193. u32 coef5;
  194. };
  195. struct kserdes_lane_ofs {
  196. struct kserdes_cmp_coef_ofs ct_ofs[MAX_CMP];
  197. };
  198. struct kserdes_ofs {
  199. struct kserdes_lane_ofs lane_ofs[KSERDES_MAX_LANES];
  200. };
  201. /*
  202. * All firmware file names end up here. List the firmware file names below.
  203. * Newest first. Search starts from the 0-th array entry until a firmware
  204. * file is found.
  205. */
  206. static const char * const ks2_gbe_serdes_firmwares[] = {"ks2_gbe_serdes.bin"};
  207. static const char * const ks2_xgbe_serdes_firmwares[] = {"ks2_xgbe_serdes.bin"};
  208. static const char * const ks2_pcie_serdes_firmwares[] = {"ks2_pcie_serdes.bin"};
  209. #define MAX_VERSION 64
  210. #define INIT_FW_MAGIC_1 0xfaceface
  211. #define INIT_FW_MAGIC_2 0xcafecafe
  212. static char *compatible_init_fw_version[] = {
  213. "3.3.0.2c",
  214. NULL,
  215. };
  216. struct serdes_cfg_header {
  217. u32 magic_1;
  218. char version[MAX_VERSION];
  219. u32 magic_2;
  220. };
  221. struct serdes_cfg {
  222. u32 ofs;
  223. u32 msb;
  224. u32 lsb;
  225. u32 val;
  226. };
  227. struct kserdes_config {
  228. struct device *dev;
  229. enum kserdes_phy_type phy_type;
  230. u32 lanes;
  231. void __iomem *regs;
  232. struct regmap *peripheral_regmap;
  233. struct regmap *pcsr_regmap;
  234. const char *init_fw;
  235. struct serdes_cfg *init_cfg;
  236. int init_cfg_len;
  237. enum kserdes_link_rate link_rate;
  238. bool rx_force_enable;
  239. struct kserdes_lane_config lane[KSERDES_MAX_LANES];
  240. struct kserdes_ofs sofs;
  241. bool firmware;
  242. struct kserdes_fw_config fw;
  243. };
  244. struct kserdes_phy {
  245. u32 lane;
  246. struct phy *phy;
  247. };
  248. struct kserdes_dev {
  249. struct device *dev;
  250. u32 nphys;
  251. struct kserdes_phy *phys[KSERDES_MAX_LANES];
  252. struct kserdes_config sc;
  253. };
  254. static struct platform_device_id kserdes_devtype[] = {
  255. {
  256. .name = "kserdes-gbe",
  257. .driver_data = KSERDES_PHY_SGMII,
  258. }, {
  259. .name = "kserdes-xgbe",
  260. .driver_data = KSERDES_PHY_XGE,
  261. }, {
  262. .name = "kserdes-pcie",
  263. .driver_data = KSERDES_PHY_PCIE,
  264. }
  265. };
  266. static inline int next_enable_lane(struct kserdes_config *sc, int i)
  267. {
  268. int j = i;
  269. while (++j < sc->lanes) {
  270. if (sc->lane[j].enable)
  271. return j;
  272. }
  273. return j;
  274. }
  275. #define for_each_lane(sc, i) \
  276. for (i = 0; i < (sc)->lanes; i++)
  277. #define for_each_enable_lane(sc, i) \
  278. for (i = -1; i = next_enable_lane(sc, i), i < sc->lanes; )
  279. static inline u32 kserdes_readl(void __iomem *base, u32 offset)
  280. {
  281. return readl(base + offset);
  282. }
  283. static inline void kserdes_writel(void __iomem *base, u32 offset, u32 value)
  284. {
  285. writel(value, base + offset);
  286. }
  287. static void kserdes_do_config(void __iomem *base,
  288. struct serdes_cfg *cfg, u32 size)
  289. {
  290. u32 i;
  291. for (i = 0; i < size; i++)
  292. FINSR(base, cfg[i].ofs, cfg[i].msb, cfg[i].lsb, cfg[i].val);
  293. }
  294. static bool is_init_fw_compatible(struct kserdes_config *sc,
  295. struct serdes_cfg_header *hdr)
  296. {
  297. int i = 0;
  298. if ((hdr->magic_1 != INIT_FW_MAGIC_1) ||
  299. (hdr->magic_2 != INIT_FW_MAGIC_2)) {
  300. dev_err(sc->dev, "incompatible fw %s\n", sc->init_fw);
  301. return false;
  302. }
  303. while (compatible_init_fw_version[i]) {
  304. if (!strcmp(compatible_init_fw_version[i], hdr->version)) {
  305. dev_info(sc->dev, "init fw %s: version %s\n",
  306. sc->init_fw, hdr->version);
  307. return true;
  308. }
  309. ++i;
  310. }
  311. dev_err(sc->dev, "incompatible fw %s: version %s\n",
  312. sc->init_fw, hdr->version);
  313. return false;
  314. }
  315. static int kserdes_load_init_fw(struct kserdes_config *sc,
  316. const char * const *a_firmwares,
  317. int n_firmwares)
  318. {
  319. const struct firmware *fw;
  320. bool found = false;
  321. int ret, i;
  322. struct serdes_cfg_header hdr;
  323. int hdr_sz;
  324. for (i = 0; i < n_firmwares; i++) {
  325. if (a_firmwares[i]) {
  326. ret = request_firmware(&fw, a_firmwares[i], sc->dev);
  327. if (!ret) {
  328. found = true;
  329. break;
  330. }
  331. }
  332. }
  333. if (!found) {
  334. dev_err(sc->dev, "can't get any serdes init fw");
  335. return -ENODEV;
  336. }
  337. sc->init_fw = a_firmwares[i];
  338. memcpy((void *)&hdr, fw->data, sizeof(hdr));
  339. hdr_sz = sizeof(hdr);
  340. hdr.version[MAX_VERSION - 1] = 0;
  341. if (!is_init_fw_compatible(sc, &hdr))
  342. return -EINVAL;
  343. sc->init_cfg = devm_kzalloc(sc->dev, fw->size - hdr_sz, GFP_KERNEL);
  344. memcpy((void *)sc->init_cfg, fw->data + hdr_sz, fw->size - hdr_sz);
  345. sc->init_cfg_len = fw->size - hdr_sz;
  346. release_firmware(fw);
  347. kserdes_do_config(sc->regs, sc->init_cfg,
  348. sc->init_cfg_len / sizeof(struct serdes_cfg));
  349. return 0;
  350. }
  351. static inline u32 _kserdes_read_tbus_val(void __iomem *sregs)
  352. {
  353. u32 tmp;
  354. if (PHY_A(sregs)) {
  355. tmp = ((kserdes_readl(sregs, CMU0_REG(0xec))) >> 24) & 0x0ff;
  356. tmp |= ((kserdes_readl(sregs, CMU0_REG(0xfc))) >> 16) & 0xf00;
  357. } else {
  358. tmp = ((kserdes_readl(sregs, CMU0_REG(0xf8))) >> 16) & 0xfff;
  359. }
  360. return tmp;
  361. }
  362. static void _kserdes_write_tbus_addr(void __iomem *sregs, int select, int ofs)
  363. {
  364. if (select && !FOUR_LANE(sregs))
  365. ++select;
  366. if (PHY_A(sregs))
  367. FINSR(sregs, CMU0_REG(0x8), 31, 24, ((select << 5) + ofs));
  368. else
  369. FINSR(sregs, CMU0_REG(0xfc), 26, 16, ((select << 8) + ofs));
  370. }
  371. static u32 _kserdes_read_select_tbus(void __iomem *sregs, int select, int ofs)
  372. {
  373. _kserdes_write_tbus_addr(sregs, select, ofs);
  374. return _kserdes_read_tbus_val(sregs);
  375. }
  376. static inline void kserdes_set_tx_idle(struct kserdes_config *sc, u32 lane)
  377. {
  378. if (sc->phy_type != KSERDES_PHY_XGE)
  379. FINSR(sc->regs, LANEX_REG(lane, 0xb8), 17, 16, 3);
  380. FINSR(sc->regs, LANE_CTRL_STS_REG(lane), 25, 24, 3);
  381. FINSR(sc->regs, LANEX_REG(lane, 0x28), 21, 20, 0);
  382. }
  383. static inline void kserdes_clr_tx_idle(struct kserdes_config *sc, u32 lane)
  384. {
  385. if (sc->phy_type != KSERDES_PHY_XGE)
  386. FINSR(sc->regs, LANEX_REG(lane, 0xb8), 17, 16, 0);
  387. FINSR(sc->regs, LANE_CTRL_STS_REG(lane), 25, 24, 0);
  388. FINSR(sc->regs, LANEX_REG(lane, 0x28), 21, 20, 0);
  389. }
  390. static void kserdes_set_lane_ov(struct kserdes_config *sc, u32 lane)
  391. {
  392. u32 val_0, val_1, val;
  393. val_0 = _kserdes_read_select_tbus(sc->regs, lane + 1, 0);
  394. val_1 = _kserdes_read_select_tbus(sc->regs, lane + 1, 1);
  395. val = 0;
  396. val |= ((val_1 >> 9) & 0x3) << 1;
  397. val |= (val_0 & 0x3) << 3;
  398. val |= ((val_0 >> 2) & 0x1ff) << 5;
  399. val |= (1 << 14);
  400. val &= ~0x60;
  401. FINSR(sc->regs, LANEX_REG(lane, 0x028), 29, 15, val);
  402. }
  403. static inline void kserdes_assert_reset(struct kserdes_config *sc)
  404. {
  405. int lane;
  406. for_each_enable_lane(sc, lane)
  407. kserdes_set_lane_ov(sc, lane);
  408. }
  409. static inline void kserdes_config_c1_c2_cm(struct kserdes_config *sc, u32 lane)
  410. {
  411. u32 c1, c2, cm;
  412. c1 = sc->lane[lane].tx_coeff.c1;
  413. c2 = sc->lane[lane].tx_coeff.c2;
  414. cm = sc->lane[lane].tx_coeff.cm;
  415. if (sc->phy_type == KSERDES_PHY_XGE) {
  416. FINSR(sc->regs, LANEX_REG(lane, 0x8), 11, 8, (cm & 0xf));
  417. FINSR(sc->regs, LANEX_REG(lane, 0x8), 4, 0, (c1 & 0x1f));
  418. FINSR(sc->regs, LANEX_REG(lane, 0x8), 7, 5, (c2 & 0x7));
  419. FINSR(sc->regs, LANEX_REG(lane, 0x4),
  420. 18, 18, ((c2 >> 3) & 0x1));
  421. } else {
  422. FINSR(sc->regs, LANEX_REG(lane, 0x8), 15, 12, (cm & 0xf));
  423. FINSR(sc->regs, LANEX_REG(lane, 0x8), 4, 0, (c1 & 0x1f));
  424. FINSR(sc->regs, LANEX_REG(lane, 0x8), 11, 8, (c2 & 0xf));
  425. }
  426. }
  427. static inline void kserdes_config_att_boost(struct kserdes_config *sc, u32 lane)
  428. {
  429. u32 att, boost;
  430. att = sc->lane[lane].rx_force.att;
  431. boost = sc->lane[lane].rx_force.boost;
  432. if (sc->phy_type == KSERDES_PHY_XGE) {
  433. FINSR(sc->regs, LANEX_REG(lane, 0x98), 13, 13, 0);
  434. FINSR(sc->regs, LANEX_REG(lane, 0x8c), 15, 12, boost);
  435. FINSR(sc->regs, LANEX_REG(lane, 0x8c), 11, 8, att);
  436. } else {
  437. if (att != -1) {
  438. FINSR(sc->regs, CML_REG(0x84), 0, 0, 0);
  439. FINSR(sc->regs, CML_REG(0x8c), 24, 24, 0);
  440. FINSR(sc->regs, LANEX_REG(lane, 0x8c), 11, 8, att);
  441. }
  442. if (boost != -1) {
  443. FINSR(sc->regs, CML_REG(0x84), 1, 1, 0);
  444. FINSR(sc->regs, CML_REG(0x8c), 25, 25, 0);
  445. FINSR(sc->regs, LANEX_REG(lane, 0x8c), 15, 12, boost);
  446. }
  447. }
  448. }
  449. static void kserdes_set_tx_rx_fir_coeff(struct kserdes_config *sc, u32 lane)
  450. {
  451. struct kserdes_tx_coeff *tc = &sc->lane[lane].tx_coeff;
  452. if (sc->phy_type == KSERDES_PHY_XGE) {
  453. FINSR(sc->regs, LANEX_REG(lane, 0x004), 29, 26, tc->att);
  454. FINSR(sc->regs, LANEX_REG(lane, 0x0a4), 2, 0, tc->vreg);
  455. } else {
  456. FINSR(sc->regs, LANEX_REG(lane, 0x004), 28, 25, tc->att);
  457. FINSR(sc->regs, LANEX_REG(lane, 0x084), 7, 5, tc->vreg);
  458. }
  459. kserdes_config_c1_c2_cm(sc, lane);
  460. if (sc->rx_force_enable)
  461. kserdes_config_att_boost(sc, lane);
  462. }
  463. static inline void
  464. _kserdes_force_signal_detect_low(void __iomem *sregs, u32 lane)
  465. {
  466. FINSR(sregs, LANEX_REG(lane, 0x004), 2, 1, 0x2);
  467. }
  468. static inline void
  469. kserdes_force_signal_detect_low(struct kserdes_config *sc, u32 lane)
  470. {
  471. _kserdes_force_signal_detect_low(sc->regs, lane);
  472. }
  473. static inline void
  474. _kserdes_force_signal_detect_high(void __iomem *sregs, u32 lane)
  475. {
  476. FINSR(sregs, LANEX_REG(lane, 0x004), 2, 1, 0x0);
  477. }
  478. static inline void
  479. kserdes_force_signal_detect_high(struct kserdes_config *sc, u32 lane)
  480. {
  481. _kserdes_force_signal_detect_high(sc->regs, lane);
  482. }
  483. static int kserdes_deassert_reset_poll_others(struct kserdes_config *sc)
  484. {
  485. unsigned long timeout = jiffies + msecs_to_jiffies(500);
  486. unsigned long time_check;
  487. u32 lanes_not_ok = 0;
  488. u32 ofs = 28;
  489. u32 ret, i;
  490. for_each_enable_lane(sc, i)
  491. lanes_not_ok |= BIT(i);
  492. if (!FOUR_LANE(sc->regs))
  493. ofs = 29;
  494. do {
  495. time_check = jiffies;
  496. for_each_enable_lane(sc, i) {
  497. if (!(lanes_not_ok & (1 << i)))
  498. continue;
  499. ret = kserdes_readl(sc->regs, CML_REG(0x1f8));
  500. if (ret & BIT(ofs + i))
  501. lanes_not_ok &= ~BIT(i);
  502. }
  503. if (!lanes_not_ok)
  504. return 0;
  505. if (time_after(time_check, timeout))
  506. return -ETIMEDOUT;
  507. cpu_relax();
  508. } while (true);
  509. }
  510. static int kserdes_deassert_reset_poll_pcie(struct kserdes_config *sc)
  511. {
  512. unsigned long timeout = jiffies + msecs_to_jiffies(500);
  513. unsigned long time_check;
  514. u32 lanes_not_ok = 0;
  515. u32 ret, i;
  516. for_each_enable_lane(sc, i)
  517. lanes_not_ok |= (1 << i);
  518. do {
  519. time_check = jiffies;
  520. for_each_enable_lane(sc, i) {
  521. if (!(lanes_not_ok & BIT(i)))
  522. continue;
  523. ret = _kserdes_read_select_tbus(sc->regs, i + 1, 0x02);
  524. if (!(ret & BIT(4)))
  525. lanes_not_ok &= ~BIT(i);
  526. }
  527. if (!lanes_not_ok)
  528. return 0;
  529. if (time_after(time_check, timeout))
  530. return -ETIMEDOUT;
  531. cpu_relax();
  532. } while (true);
  533. }
  534. static inline void _kserdes_lane_reset(void __iomem *serdes,
  535. u32 lane, u32 reset)
  536. {
  537. FINSR(serdes, LANEX_REG(lane, 0x28), 29, 29, !!reset);
  538. }
  539. static inline void kserdes_release_reset(struct kserdes_config *sc, u32 lane)
  540. {
  541. if (sc->phy_type == KSERDES_PHY_XGE)
  542. FINSR(sc->regs, LANEX_REG(lane, 0x60), 0, 0, 0x1);
  543. _kserdes_lane_reset(sc->regs, lane, 0);
  544. }
  545. static int kserdes_deassert_reset(struct kserdes_config *sc, u32 poll)
  546. {
  547. int ret = 0, lane;
  548. for_each_enable_lane(sc, lane)
  549. kserdes_release_reset(sc, lane);
  550. if (!poll)
  551. goto done;
  552. if (sc->phy_type == KSERDES_PHY_PCIE)
  553. ret = kserdes_deassert_reset_poll_pcie(sc);
  554. else
  555. ret = kserdes_deassert_reset_poll_others(sc);
  556. done:
  557. return ret;
  558. }
  559. static inline void _kserdes_lane_enable(void __iomem *sregs, u32 lane)
  560. {
  561. FINSR(sregs, LANE_CTRL_STS_REG(lane), 31, 29, 0x7);
  562. FINSR(sregs, LANE_CTRL_STS_REG(lane), 15, 13, 0x7);
  563. }
  564. static inline int _kserdes_set_lane_ctrl_rate(void __iomem *sregs, u32 lane,
  565. enum kserdes_lane_ctrl_rate rate)
  566. {
  567. u32 rate_mode;
  568. switch (rate) {
  569. case KSERDES_FULL_RATE:
  570. rate_mode = 0x4;
  571. break;
  572. case KSERDES_QUARTER_RATE:
  573. rate_mode = 0x6;
  574. break;
  575. case KSERDES_HALF_RATE:
  576. rate_mode = 0x5;
  577. break;
  578. default:
  579. return -EINVAL;
  580. }
  581. FINSR(sregs, LANE_CTRL_STS_REG(lane), 28, 26, rate_mode);
  582. FINSR(sregs, LANE_CTRL_STS_REG(lane), 12, 10, rate_mode);
  583. return 0;
  584. }
  585. static inline void _kserdes_set_lane_loopback(void __iomem *sregs, u32 lane,
  586. enum kserdes_phy_type phy_type)
  587. {
  588. if (phy_type == KSERDES_PHY_XGE) {
  589. FINSR(sregs, LANEX_REG(lane, 0x0), 7, 0, 0x4);
  590. FINSR(sregs, LANEX_REG(lane, 0x4), 2, 1, 0x3);
  591. } else {
  592. FINSR(sregs, LANEX_REG(lane, 0x0), 31, 24, 0x40);
  593. }
  594. }
  595. static inline void _kserdes_clear_wait_after(void __iomem *sregs)
  596. {
  597. FINSR(sregs, PLL_CTRL_REG, 17, 16, 0);
  598. }
  599. static inline void _kserdes_clear_lane_wait_after(void __iomem *sregs, u32 lane)
  600. {
  601. FINSR(sregs, PLL_CTRL_REG, lane + 12, lane + 12, 1);
  602. FINSR(sregs, PLL_CTRL_REG, lane + 4, lane + 4, 1);
  603. }
  604. static inline void _kserdes_pll_enable(void __iomem *sregs)
  605. {
  606. FINSR(sregs, PLL_CTRL_REG, 31, 29, 0x7);
  607. }
  608. static inline void _kserdes_pll2_enable(void __iomem *sregs)
  609. {
  610. FINSR(sregs, PLL_CTRL_REG, 27, 25, 0x7);
  611. }
  612. static inline void _kserdes_pll_disable(void __iomem *sregs)
  613. {
  614. FINSR(sregs, PLL_CTRL_REG, 31, 29, 0x4);
  615. }
  616. static inline void _kserdes_pll2_disable(void __iomem *sregs)
  617. {
  618. FINSR(sregs, PLL_CTRL_REG, 27, 25, 0x4);
  619. }
  620. static inline u32 _kserdes_get_pll_status(void __iomem *sregs)
  621. {
  622. return FEXTR(kserdes_readl(sregs, PLL_CTRL_REG), 28, 28);
  623. }
  624. static inline u32 _kserdes_get_pll2_status(void __iomem *sregs)
  625. {
  626. return FEXTR(kserdes_readl(sregs, PLL_CTRL_REG), 24, 24);
  627. }
  628. static inline void kserdes_lane_enable_loopback(void __iomem *serdes, u32 lane)
  629. {
  630. FINSR(serdes, LANEX_REG(lane, 0), 31, 24, 0x40);
  631. }
  632. static inline u32 _kserdes_get_lane_status(void __iomem *sregs, u32 lane,
  633. enum kserdes_phy_type phy_type)
  634. {
  635. int d = ((phy_type == KSERDES_PHY_PCIE) ? 0 : 8);
  636. return FEXTR(kserdes_readl(sregs, PLL_CTRL_REG), lane + d, lane + d);
  637. }
  638. static u32 kserdes_get_pll_lanes_status(struct kserdes_config *sc)
  639. {
  640. u32 val, i;
  641. val = _kserdes_get_pll_status(sc->regs);
  642. if (!val)
  643. goto done;
  644. if (sc->phy_type == KSERDES_PHY_XGE) {
  645. val = _kserdes_get_pll2_status(sc->regs);
  646. if (!val)
  647. goto done;
  648. }
  649. for_each_enable_lane(sc, i)
  650. val &= _kserdes_get_lane_status(sc->regs, i, sc->phy_type);
  651. done:
  652. return val;
  653. }
  654. static int kserdes_get_status(struct kserdes_config *sc)
  655. {
  656. unsigned long timeout = jiffies + msecs_to_jiffies(500);
  657. unsigned long time_check;
  658. do {
  659. time_check = jiffies;
  660. if (kserdes_get_pll_lanes_status(sc))
  661. break;
  662. if (time_after(time_check, timeout))
  663. return -ETIMEDOUT;
  664. cpu_relax();
  665. } while (true);
  666. return 0;
  667. }
  668. static inline u32 _kserdes_get_tx_termination(void __iomem *sregs,
  669. enum kserdes_phy_type phy_type)
  670. {
  671. return (_kserdes_read_select_tbus(sregs, 1,
  672. ((phy_type == KSERDES_PHY_XGE) ?
  673. 0x1a : 0x1b)) & 0xff);
  674. }
  675. static void kserdes_set_tx_terminations(struct kserdes_config *sc, u32 term)
  676. {
  677. int i;
  678. for_each_lane(sc, i) {
  679. FINSR(sc->regs, LANEX_REG(i, 0x7c), 31, 24, term);
  680. FINSR(sc->regs, LANEX_REG(i, 0x7c), 20, 20, 0x1);
  681. }
  682. }
  683. static void
  684. _kserdes_write_ofs_xge(void __iomem *sregs, u32 lane, u32 cmp,
  685. struct kserdes_cmp_coef_ofs *ofs)
  686. {
  687. FINSR(sregs, CML_REG(0x8c), 23, 21, cmp);
  688. FINSR(sregs, CMU0_REG(0xfc), 26, 16, ((lane + 2) << 8) + 0x11);
  689. ofs->cmp = (_kserdes_read_tbus_val(sregs) & 0x0ff0) >> 4;
  690. FINSR(sregs, CMU0_REG(0xfc), 26, 16, ((lane + 2) << 8) + 0x11);
  691. ofs->coef1 = (_kserdes_read_tbus_val(sregs) & 0x000f) << 3;
  692. FINSR(sregs, CMU0_REG(0xfc), 26, 16, ((lane + 2) << 8) + 0x12);
  693. ofs->coef1 |= (_kserdes_read_tbus_val(sregs) & 0x0e00) >> 9;
  694. ofs->coef2 = (_kserdes_read_tbus_val(sregs) & 0x01f8) >> 3;
  695. ofs->coef3 = (_kserdes_read_tbus_val(sregs) & 0x0007) << 3;
  696. FINSR(sregs, CMU0_REG(0xfc), 26, 16, ((lane + 2) << 8) + 0x13);
  697. ofs->coef3 |= (_kserdes_read_tbus_val(sregs) & 0x0e00) >> 9;
  698. ofs->coef4 = (_kserdes_read_tbus_val(sregs) & 0x01f8) >> 3;
  699. ofs->coef5 = (_kserdes_read_tbus_val(sregs) & 0x0007) << 3;
  700. FINSR(sregs, CMU0_REG(0xfc), 26, 16, ((lane + 2) << 8) + 0x14);
  701. ofs->coef5 |= (_kserdes_read_tbus_val(sregs) & 0x0e00) >> 9;
  702. }
  703. static void kserdes_add_ofs_xge(struct kserdes_config *sc,
  704. struct kserdes_ofs *sofs)
  705. {
  706. struct kserdes_cmp_coef_ofs *ctofs;
  707. struct kserdes_cmp_coef_ofs sample;
  708. struct kserdes_lane_ofs *lofs;
  709. u32 lane, cmp;
  710. for_each_enable_lane(sc, lane) {
  711. lofs = &sofs->lane_ofs[lane];
  712. for_each_cmp(cmp) {
  713. ctofs = &lofs->ct_ofs[cmp];
  714. _kserdes_write_ofs_xge(sc->regs, lane, cmp, &sample);
  715. ctofs->cmp += sample.cmp;
  716. ctofs->coef1 += sample.coef1;
  717. ctofs->coef2 += sample.coef2;
  718. ctofs->coef3 += sample.coef3;
  719. ctofs->coef4 += sample.coef4;
  720. ctofs->coef5 += sample.coef5;
  721. }
  722. }
  723. }
  724. static void
  725. kserdes_get_cmp_coef_ofs_non_xge(void __iomem *sregs, u32 lane, u32 cmp,
  726. struct kserdes_cmp_coef_ofs *ofs)
  727. {
  728. FINSR(sregs, CML_REG(0x8c), 23, 21, cmp);
  729. FINSR(sregs, CMU0_REG(0x8), 31, 24, ((lane + 1) << 5) + 0x12);
  730. ofs->cmp = (_kserdes_read_tbus_val(sregs) & 0x0ff0) >> 4;
  731. }
  732. static void kserdes_add_ofs_non_xge(struct kserdes_config *sc,
  733. struct kserdes_ofs *sofs)
  734. {
  735. struct kserdes_cmp_coef_ofs *ctofs;
  736. struct kserdes_cmp_coef_ofs sample;
  737. struct kserdes_lane_ofs *lofs;
  738. u32 lane, cmp;
  739. for_each_enable_lane(sc, lane) {
  740. lofs = &sofs->lane_ofs[lane];
  741. for_each_cmp(cmp) {
  742. ctofs = &lofs->ct_ofs[cmp];
  743. kserdes_get_cmp_coef_ofs_non_xge(sc->regs, lane,
  744. cmp, &sample);
  745. ctofs->cmp += sample.cmp;
  746. }
  747. }
  748. }
  749. static void kserdes_get_average_ofs(struct kserdes_config *sc, u32 samples,
  750. struct kserdes_ofs *sofs)
  751. {
  752. struct kserdes_cmp_coef_ofs *ctofs;
  753. struct kserdes_lane_ofs *lofs;
  754. u32 i, lane, cmp;
  755. int ret;
  756. memset(sofs, 0, sizeof(*sofs));
  757. for (i = 0; i < samples; i++) {
  758. kserdes_assert_reset(sc);
  759. ret = kserdes_deassert_reset(sc, 1);
  760. if (ret) {
  761. dev_err(sc->dev,
  762. "kserdes_get_average_ofs: reset failed %d\n",
  763. ret);
  764. return;
  765. }
  766. if (sc->phy_type == KSERDES_PHY_XGE)
  767. kserdes_add_ofs_xge(sc, sofs);
  768. else
  769. kserdes_add_ofs_non_xge(sc, sofs);
  770. }
  771. for_each_enable_lane(sc, lane) {
  772. lofs = &sofs->lane_ofs[lane];
  773. for_each_cmp(cmp) {
  774. ctofs = &lofs->ct_ofs[cmp];
  775. if (sc->phy_type == KSERDES_PHY_XGE) {
  776. ctofs->cmp /= samples;
  777. ctofs->coef1 /= samples;
  778. ctofs->coef2 /= samples;
  779. ctofs->coef3 /= samples;
  780. ctofs->coef4 /= samples;
  781. ctofs->coef5 /= samples;
  782. } else {
  783. ctofs->cmp /= samples;
  784. }
  785. }
  786. }
  787. }
  788. static void _kserdes_set_ofs(void __iomem *sregs, u32 lane, u32 cmp,
  789. struct kserdes_cmp_coef_ofs *ofs)
  790. {
  791. FINSR(sregs, CML_REG(0xf0), 27, 26, (lane + 1));
  792. FINSR(sregs, CML_REG(0x98), 24, 24, 0x1);
  793. FINSR(sregs, LANEX_REG(lane, 0x2c), 2, 2, 0x1);
  794. FINSR(sregs, LANEX_REG(lane, 0x30), 7, 5, cmp);
  795. FINSR(sregs, LANEX_REG(lane, 0x5c), 31, 31, 0x1);
  796. FINSR(sregs, CML_REG(0x9c), 7, 0, ofs->cmp);
  797. FINSR(sregs, LANEX_REG(lane, 0x58), 30, 24, ofs->coef1);
  798. FINSR(sregs, LANEX_REG(lane, 0x5c), 5, 0, ofs->coef2);
  799. FINSR(sregs, LANEX_REG(lane, 0x5c), 13, 8, ofs->coef3);
  800. FINSR(sregs, LANEX_REG(lane, 0x5c), 21, 16, ofs->coef4);
  801. FINSR(sregs, LANEX_REG(lane, 0x5c), 29, 24, ofs->coef5);
  802. FINSR(sregs, LANEX_REG(lane, 0x2c), 10, 10, 0x1);
  803. FINSR(sregs, LANEX_REG(lane, 0x2c), 10, 10, 0x0);
  804. FINSR(sregs, CML_REG(0x98), 24, 24, 0x0);
  805. FINSR(sregs, LANEX_REG(lane, 0x2c), 2, 2, 0x0);
  806. FINSR(sregs, LANEX_REG(lane, 0x5c), 31, 31, 0x0);
  807. }
  808. static inline void _kserdes_set_cmp_ofs_phyb(void __iomem *sregs, u32 lane,
  809. u32 cmp, u32 cmp_ofs)
  810. {
  811. FINSR(sregs, LANEX_REG(lane, 0x58), 18, 18, 0x1);
  812. FINSR(sregs, LANEX_REG(lane, 0x4c), 5, 2, (0x1 << (cmp - 1)));
  813. FINSR(sregs, LANEX_REG(lane, 0x48), 24, 17, cmp_ofs);
  814. FINSR(sregs, LANEX_REG(lane, 0x48), 29, 29, 0x1);
  815. FINSR(sregs, LANEX_REG(lane, 0x48), 29, 29, 0x0);
  816. FINSR(sregs, LANEX_REG(lane, 0x58), 18, 18, 0x0);
  817. }
  818. static inline void _kserdes_set_coef_ofs(void __iomem *sregs, u32 lane,
  819. u32 coef, u32 width, u32 coef_ofs)
  820. {
  821. FINSR(sregs, LANEX_REG(lane, 0x58), 23, 19, (0x1 << (coef - 1)));
  822. FINSR(sregs, LANEX_REG(lane, 0x48), 17 + (width - 1), 17, coef_ofs);
  823. FINSR(sregs, LANEX_REG(lane, 0x48), 29, 29, 0x1);
  824. FINSR(sregs, LANEX_REG(lane, 0x48), 29, 29, 0x0);
  825. }
  826. static void _kserdes_set_ofs_phyb(void __iomem *sregs, u32 lane, u32 cmp,
  827. struct kserdes_cmp_coef_ofs *ofs)
  828. {
  829. FINSR(sregs, LANEX_REG(lane, 0x58), 16, 16, 0x1);
  830. FINSR(sregs, LANEX_REG(lane, 0x48), 16, 16, 0x1);
  831. _kserdes_set_cmp_ofs_phyb(sregs, lane, cmp, ofs->cmp);
  832. FINSR(sregs, LANEX_REG(lane, 0x58), 17, 17, 0x1);
  833. _kserdes_set_coef_ofs(sregs, lane, 1, 7, ofs->coef1);
  834. _kserdes_set_coef_ofs(sregs, lane, 2, 6, ofs->coef2);
  835. _kserdes_set_coef_ofs(sregs, lane, 3, 6, ofs->coef3);
  836. _kserdes_set_coef_ofs(sregs, lane, 4, 6, ofs->coef4);
  837. _kserdes_set_coef_ofs(sregs, lane, 5, 6, ofs->coef5);
  838. FINSR(sregs, LANEX_REG(lane, 0x58), 16, 16, 0x0);
  839. FINSR(sregs, LANEX_REG(lane, 0x48), 16, 16, 0x0);
  840. FINSR(sregs, LANEX_REG(lane, 0x58), 18, 18, 0x0);
  841. FINSR(sregs, LANEX_REG(lane, 0x58), 17, 17, 0x0);
  842. }
  843. static void kserdes_set_ofs_xge(struct kserdes_config *sc,
  844. struct kserdes_ofs *sofs)
  845. {
  846. struct kserdes_cmp_coef_ofs *ctofs;
  847. struct kserdes_lane_ofs *lofs;
  848. int lane, cmp;
  849. for_each_enable_lane(sc, lane) {
  850. lofs = &sofs->lane_ofs[lane];
  851. for_each_cmp(cmp) {
  852. ctofs = &lofs->ct_ofs[cmp];
  853. _kserdes_set_ofs(sc->regs, lane, cmp, ctofs);
  854. _kserdes_set_ofs_phyb(sc->regs, lane, cmp, ctofs);
  855. }
  856. }
  857. }
  858. static void kserdes_set_ofs_non_xge(struct kserdes_config *sc,
  859. struct kserdes_ofs *sofs)
  860. {
  861. struct kserdes_cmp_coef_ofs *ctofs;
  862. struct kserdes_lane_ofs *lofs;
  863. u32 lane, cmp;
  864. for_each_enable_lane(sc, lane) {
  865. lofs = &sofs->lane_ofs[lane];
  866. for_each_cmp(cmp) {
  867. ctofs = &lofs->ct_ofs[cmp];
  868. _kserdes_set_ofs(sc->regs, lane, cmp, ctofs);
  869. }
  870. }
  871. }
  872. static void kserdes_set_average_ofs(struct kserdes_config *sc,
  873. struct kserdes_ofs *sofs)
  874. {
  875. if (sc->phy_type == KSERDES_PHY_XGE)
  876. kserdes_set_ofs_xge(sc, sofs);
  877. else
  878. kserdes_set_ofs_non_xge(sc, sofs);
  879. }
  880. static void kserdes_phyb_init_config(struct kserdes_config *sc,
  881. struct kserdes_ofs *sofs)
  882. {
  883. int lane;
  884. for_each_enable_lane(sc, lane)
  885. kserdes_force_signal_detect_low(sc, lane);
  886. usleep_range(10, 20);
  887. kserdes_get_average_ofs(sc, OFFSET_SAMPLES, sofs);
  888. kserdes_set_average_ofs(sc, sofs);
  889. usleep_range(10, 20);
  890. for_each_enable_lane(sc, lane)
  891. kserdes_force_signal_detect_high(sc, lane);
  892. usleep_range(10, 20);
  893. }
  894. static int kserdes_wait_lane_rx_valid(struct kserdes_config *sc, u32 lane)
  895. {
  896. unsigned long timeout = jiffies + msecs_to_jiffies(500);
  897. unsigned long time_check;
  898. u32 status;
  899. do {
  900. time_check = jiffies;
  901. status = _kserdes_read_select_tbus(sc->regs, lane + 1, 0x02);
  902. if (status & 0x20)
  903. return 0;
  904. if (time_after(time_check, timeout))
  905. return -ETIMEDOUT;
  906. cpu_relax();
  907. } while (true);
  908. }
  909. static inline void _kserdes_reset(void __iomem *sregs)
  910. {
  911. FINSR(sregs, CPU_CTRL_REG, 29, 29, 0x1);
  912. usleep_range(10, 20);
  913. FINSR(sregs, CPU_CTRL_REG, 29, 29, 0x0);
  914. usleep_range(10, 20);
  915. }
  916. static inline void kserdes_xge_pll_enable(struct kserdes_config *sc)
  917. {
  918. if (!sc->firmware)
  919. FINSR(sc->regs, CML_REG(0), 7, 0, 0x1f);
  920. if (sc->link_rate == KSERDES_LINK_RATE_10P3125G) {
  921. _kserdes_pll_enable(sc->regs);
  922. _kserdes_pll2_enable(sc->regs);
  923. } else if (sc->link_rate == KSERDES_LINK_RATE_1P25G) {
  924. kserdes_writel(sc->regs, PLL_CTRL_REG, PLL_ENABLE_1P25G);
  925. }
  926. }
  927. static inline void _kserdes_enable_xgmii_port(struct regmap *peripheral_regmap,
  928. u32 port)
  929. {
  930. regmap_update_bits(peripheral_regmap, XGE_CTRL_OFFSET,
  931. GENMASK(port, port), BIT(port));
  932. }
  933. static inline void _kserdes_reset_rx(void __iomem *sregs, int lane)
  934. {
  935. _kserdes_force_signal_detect_low(sregs, lane);
  936. usleep_range(1000, 2000);
  937. _kserdes_force_signal_detect_high(sregs, lane);
  938. }
  939. static int kserdes_check_link_status(struct kserdes_config *sc,
  940. u32 *current_lane_state,
  941. u32 lanes_chk_mask,
  942. u32 *lanes_up_mask)
  943. {
  944. struct device *dev = sc->dev;
  945. u32 pcsr_rx_stat, blk_lock, blk_errs;
  946. int loss, i, link_up = 1;
  947. int ret;
  948. unsigned long lmask = (unsigned long)lanes_chk_mask;
  949. for_each_set_bit(i, &lmask, 8) {
  950. loss = (kserdes_readl(sc->regs, LANE_CTRL_STS_REG(i))) & 0x01;
  951. ret = regmap_read(sc->pcsr_regmap, PCSR_RX_STATUS(i),
  952. &pcsr_rx_stat);
  953. if (ret)
  954. return ret;
  955. blk_lock = (pcsr_rx_stat >> 30) & 0x1;
  956. blk_errs = (pcsr_rx_stat >> 16) & 0x0ff;
  957. if (blk_errs)
  958. blk_lock = 0;
  959. switch (current_lane_state[i]) {
  960. case 0:
  961. if (!loss && blk_lock) {
  962. dev_dbg(dev, "XGE PCSR Linked Lane: %d\n", i);
  963. FINSR(sc->regs, LANEX_REG(i, 0x04), 2, 1, 0);
  964. current_lane_state[i] = 1;
  965. } else if (!blk_lock) {
  966. dev_dbg(dev,
  967. "XGE PCSR Recover Lane: %d\n", i);
  968. _kserdes_reset_rx(sc->regs, i);
  969. }
  970. break;
  971. case 1:
  972. if (!blk_lock)
  973. current_lane_state[i] = 2;
  974. break;
  975. case 2:
  976. if (blk_lock) {
  977. current_lane_state[i] = 1;
  978. } else {
  979. _kserdes_reset_rx(sc->regs, i);
  980. current_lane_state[i] = 0;
  981. }
  982. break;
  983. default:
  984. dev_info(dev,
  985. "XGE: unknown current_lane_state[%d] %d\n",
  986. i, current_lane_state[i]);
  987. break;
  988. }
  989. if (blk_errs) {
  990. regmap_update_bits(sc->pcsr_regmap, PCSR_RX_CTL(i),
  991. GENMASK(7, 0), 0x19);
  992. regmap_update_bits(sc->pcsr_regmap, PCSR_RX_CTL(i),
  993. GENMASK(7, 0), 0x00);
  994. }
  995. if (current_lane_state[i] == 1) {
  996. *lanes_up_mask |= BIT(i);
  997. } else {
  998. *lanes_up_mask &= ~BIT(i);
  999. link_up = 0;
  1000. }
  1001. }
  1002. return link_up;
  1003. }
  1004. static int kserdes_wait_link_up(struct kserdes_config *sc,
  1005. u32 lanes_chk_mask,
  1006. u32 *lanes_up_mask)
  1007. {
  1008. u32 current_state[KSERDES_MAX_LANES];
  1009. unsigned long time_check = 0;
  1010. int i, link_up, ret = 0;
  1011. memset(current_state, 0, sizeof(current_state));
  1012. do {
  1013. usleep_range(10000, 20000);
  1014. link_up = kserdes_check_link_status(sc, current_state,
  1015. lanes_chk_mask,
  1016. lanes_up_mask);
  1017. if (link_up)
  1018. break;
  1019. for_each_enable_lane(sc, i) {
  1020. if (!(*lanes_up_mask & BIT(i))) {
  1021. dev_dbg(sc->dev,
  1022. "XGE: detected lane %d down\n", i);
  1023. }
  1024. }
  1025. if (++time_check >= 200) {
  1026. ret = -ETIMEDOUT;
  1027. break;
  1028. }
  1029. } while (1);
  1030. return ret;
  1031. }
  1032. static inline void kserdes_xfw_get_lane_params(struct kserdes_config *sc,
  1033. int lane)
  1034. {
  1035. struct kserdes_fw_config *fw = &sc->fw;
  1036. u32 tx_ctrl, val_0, val_1;
  1037. u32 phy_a = PHY_A(sc->regs);
  1038. val_0 = kserdes_readl(sc->regs, LANEX_REG(lane, 0x04));
  1039. val_1 = kserdes_readl(sc->regs, LANEX_REG(lane, 0x08));
  1040. tx_ctrl = ((((val_0 >> 18) & 0x1) << 24) |
  1041. (((val_1 >> 0) & 0xffff) << 8) |
  1042. (((val_0 >> 24) & 0xff) << 0));
  1043. if (phy_a) {
  1044. fw->cm = (val_1 >> 12) & 0xf;
  1045. fw->c1 = (val_1 >> 0) & 0x1f;
  1046. fw->c2 = (val_1 >> 8) & 0xf;
  1047. } else {
  1048. fw->cm = (tx_ctrl >> 16) & 0xf;
  1049. fw->c1 = (tx_ctrl >> 8) & 0x1f;
  1050. fw->c2 = (tx_ctrl >> 13) & 0x7;
  1051. fw->c2 = fw->c2 | (((tx_ctrl >> 24) & 0x1) << 3);
  1052. }
  1053. val_0 = _kserdes_read_select_tbus(sc->regs, lane + 1,
  1054. (phy_a ? 0x11 : 0x10));
  1055. fw->attn = (val_0 >> 4) & 0xf;
  1056. fw->boost = (val_0 >> 8) & 0xf;
  1057. val_0 = _kserdes_read_select_tbus(sc->regs, lane + 1, 0x5);
  1058. fw->dlpf = (val_0 >> 2) & 0x3ff;
  1059. val_0 = _kserdes_read_select_tbus(sc->regs, lane + 1, 0x6);
  1060. fw->rxcal = (val_0 >> 3) & 0xff;
  1061. }
  1062. static inline void kserdes_xfw_mem_init(struct kserdes_config *sc)
  1063. {
  1064. struct kserdes_fw_config *fw = &sc->fw;
  1065. u32 i, lane_config = 0;
  1066. for_each_lane(sc, i)
  1067. lane_config = (lane_config << 8) | (fw->lane_config[i] & 0xff);
  1068. lane_config <<= 8;
  1069. kserdes_writel(sc->regs, MEM_ADR_REG, KSERDES_XFW_CONFIG_START_ADDR);
  1070. for (i = KSERDES_XFW_CONFIG_START_ADDR;
  1071. i < KSERDES_XFW_PARAM_START_ADDR; i += 4)
  1072. kserdes_writel(sc->regs, MEM_DATINC_REG, 0x00000000);
  1073. kserdes_writel(sc->regs, MEM_DATINC_REG, XFM_FLUSH_CMD);
  1074. kserdes_writel(sc->regs, MEM_DATINC_REG, fw->fast_train);
  1075. kserdes_writel(sc->regs, MEM_DATINC_REG, 0x00000000);
  1076. kserdes_writel(sc->regs, MEM_DATINC_REG, fw->lane_seeds);
  1077. kserdes_writel(sc->regs, MEM_DATINC_REG, lane_config);
  1078. }
  1079. static int kserdes_pcie_lanes_enable(struct kserdes_config *sc)
  1080. {
  1081. int ret, i;
  1082. u32 lanes_enable = 0;
  1083. for_each_enable_lane(sc, i)
  1084. lanes_enable |= BIT(i);
  1085. for_each_lane(sc, i) {
  1086. kserdes_release_reset(sc, i);
  1087. if (sc->lane[i].loopback)
  1088. _kserdes_set_lane_loopback(sc->regs, i, sc->phy_type);
  1089. }
  1090. ret = kserdes_get_status(sc);
  1091. if (ret)
  1092. return ret;
  1093. return lanes_enable;
  1094. }
  1095. static void kserdes_clear_wait_after(struct kserdes_config *sc,
  1096. unsigned long lanes_mask)
  1097. {
  1098. u32 lane;
  1099. if (!sc->rx_force_enable) {
  1100. for_each_set_bit(lane, &lanes_mask, 8) {
  1101. if (!LANE_ENABLE(sc, lane))
  1102. continue;
  1103. _kserdes_clear_lane_wait_after(sc->regs, lane);
  1104. }
  1105. } else {
  1106. _kserdes_clear_wait_after(sc->regs);
  1107. }
  1108. }
  1109. static int kserdes_check_lanes_status(struct kserdes_config *sc)
  1110. {
  1111. unsigned long timeout = jiffies + msecs_to_jiffies(500);
  1112. unsigned long time_check;
  1113. u32 val, i;
  1114. do {
  1115. time_check = jiffies;
  1116. val = 1;
  1117. for_each_enable_lane(sc, i)
  1118. val &= _kserdes_get_lane_status(sc->regs, i,
  1119. sc->phy_type);
  1120. if (val)
  1121. break;
  1122. if (time_after(time_check, timeout))
  1123. return -ETIMEDOUT;
  1124. cpu_relax();
  1125. } while (true);
  1126. return 0;
  1127. }
  1128. static void kserdes_phya_init_config(struct kserdes_config *sc, u32 lane)
  1129. {
  1130. u32 coef1val, coef2val, coef3val, coef4val, coef5val;
  1131. void __iomem *sregs = sc->regs;
  1132. u32 cmp, coef1_ofs;
  1133. for_each_cmp(cmp) {
  1134. if (!(cmp & 0x1))
  1135. continue;
  1136. FINSR(sregs, CML_REG(0x8c), 23, 21, cmp);
  1137. FINSR(sregs, CMU0_REG(0x8), 31, 24, ((lane + 1) << 5) + 0x12);
  1138. coef1_ofs = (_kserdes_read_tbus_val(sregs) & 0x000f) << 3;
  1139. FINSR(sregs, CMU0_REG(0x8), 31, 24, ((lane + 1) << 5) + 0x13);
  1140. coef1_ofs |= (_kserdes_read_tbus_val(sregs) & 0x0e00) >> 9;
  1141. coef1val = coef1_ofs - 14;
  1142. coef2val = 31;
  1143. coef3val = 31;
  1144. coef4val = 31;
  1145. coef5val = 31;
  1146. FINSR(sregs, CML_REG(0xf0), 27, 26, lane + 1);
  1147. FINSR(sregs, LANEX_REG(lane, 0x2c), 2, 2, 0x1);
  1148. FINSR(sregs, LANEX_REG(lane, 0x30), 7, 5, cmp);
  1149. FINSR(sregs, LANEX_REG(lane, 0x5c), 31, 31, 0x1);
  1150. FINSR(sregs, LANEX_REG(lane, 0x58), 30, 24, coef1val);
  1151. FINSR(sregs, LANEX_REG(lane, 0x5c), 6, 0, coef2val);
  1152. FINSR(sregs, LANEX_REG(lane, 0x5c), 13, 8, coef3val);
  1153. FINSR(sregs, LANEX_REG(lane, 0x5c), 21, 16, coef4val);
  1154. FINSR(sregs, LANEX_REG(lane, 0x5c), 29, 24, coef5val);
  1155. FINSR(sregs, LANEX_REG(lane, 0x2c), 10, 10, 0x1);
  1156. FINSR(sregs, LANEX_REG(lane, 0x2c), 10, 10, 0x0);
  1157. FINSR(sregs, LANEX_REG(lane, 0x2c), 2, 2, 0x0);
  1158. FINSR(sregs, LANEX_REG(lane, 0x5c), 31, 31, 0x0);
  1159. FINSR(sregs, LANEX_REG(lane, 0x58), 16, 16, 0x1);
  1160. FINSR(sregs, LANEX_REG(lane, 0x48), 16, 16, 0x1);
  1161. FINSR(sregs, LANEX_REG(lane, 0x4c), 5, 2, (0x1 << (cmp - 1)));
  1162. FINSR(sregs, LANEX_REG(lane, 0x58), 17, 17, 0x1);
  1163. _kserdes_set_coef_ofs(sregs, lane, 1, 7, coef1val);
  1164. _kserdes_set_coef_ofs(sregs, lane, 2, 6, coef2val);
  1165. _kserdes_set_coef_ofs(sregs, lane, 3, 6, coef3val);
  1166. _kserdes_set_coef_ofs(sregs, lane, 4, 6, coef4val);
  1167. _kserdes_set_coef_ofs(sregs, lane, 5, 6, coef5val);
  1168. FINSR(sregs, LANEX_REG(lane, 0x58), 16, 16, 0x0);
  1169. FINSR(sregs, LANEX_REG(lane, 0x48), 16, 16, 0x0);
  1170. FINSR(sregs, LANEX_REG(lane, 0x58), 17, 17, 0x0);
  1171. }
  1172. }
  1173. static int kserdes_check_pll_status(struct kserdes_config *sc)
  1174. {
  1175. unsigned long timeout = jiffies + msecs_to_jiffies(500);
  1176. unsigned long time_check;
  1177. u32 val;
  1178. do {
  1179. time_check = jiffies;
  1180. val = _kserdes_get_pll_status(sc->regs);
  1181. if (sc->phy_type == KSERDES_PHY_XGE)
  1182. val &= _kserdes_get_pll2_status(sc->regs);
  1183. if (val)
  1184. break;
  1185. if (time_after(time_check, timeout))
  1186. return -ETIMEDOUT;
  1187. cpu_relax();
  1188. } while (true);
  1189. return 0;
  1190. }
  1191. static void kserdes_enable_common_set_lane_rate(struct kserdes_config *sc,
  1192. u32 lane)
  1193. {
  1194. int ret;
  1195. ret = _kserdes_set_lane_ctrl_rate(sc->regs, lane,
  1196. sc->lane[lane].ctrl_rate);
  1197. if (ret) {
  1198. dev_err(sc->dev, "set_lane_rate FAILED: lane = %d err = %d\n",
  1199. lane, ret);
  1200. return;
  1201. }
  1202. switch (sc->phy_type) {
  1203. case KSERDES_PHY_SGMII:
  1204. FINSR(sc->regs, LANE_CTRL_STS_REG(lane), 7, 6, 0x3);
  1205. FINSR(sc->regs, LANE_CTRL_STS_REG(lane), 23, 21, 0x4);
  1206. FINSR(sc->regs, LANE_CTRL_STS_REG(lane), 5, 3, 0x4);
  1207. break;
  1208. case KSERDES_PHY_XGE:
  1209. FINSR(sc->regs, LANE_CTRL_STS_REG(lane), 23, 21, 0x7);
  1210. FINSR(sc->regs, LANE_CTRL_STS_REG(lane), 5, 3, 0x7);
  1211. if (sc->link_rate == KSERDES_LINK_RATE_10P3125G) {
  1212. FINSR(sc->regs, LANE_CTRL_STS_REG(lane), 16, 16, 0x1);
  1213. FINSR(sc->regs, LANE_CTRL_STS_REG(lane), 19, 19, 0x1);
  1214. }
  1215. break;
  1216. case KSERDES_PHY_PCIE:
  1217. break;
  1218. default:
  1219. FINSR(sc->regs, LANE_CTRL_STS_REG(lane), 23, 21, 0x6);
  1220. FINSR(sc->regs, LANE_CTRL_STS_REG(lane), 5, 3, 0x6);
  1221. break;
  1222. }
  1223. if (sc->lane[lane].loopback)
  1224. _kserdes_set_lane_loopback(sc->regs, lane, sc->phy_type);
  1225. if (sc->phy_type != KSERDES_PHY_XGE) {
  1226. FINSR(sc->regs, LANEX_REG(lane, 0x30), 11, 11, 0x1);
  1227. FINSR(sc->regs, LANEX_REG(lane, 0x30), 13, 12, 0x0);
  1228. }
  1229. }
  1230. static inline void kserdes_set_lane_rx_starts(struct kserdes_config *sc,
  1231. u32 lane)
  1232. {
  1233. FINSR(sc->regs, LANEX_REG(lane, 0x8c), 11, 8,
  1234. sc->lane[lane].rx_start.att);
  1235. FINSR(sc->regs, LANEX_REG(lane, 0x8c), 15, 12,
  1236. sc->lane[lane].rx_start.boost);
  1237. FINSR(sc->regs, LANEX_REG(lane, 0x84), 27, 24,
  1238. sc->lane[lane].rx_start.att);
  1239. FINSR(sc->regs, LANEX_REG(lane, 0x84), 31, 28,
  1240. sc->lane[lane].rx_start.boost);
  1241. FINSR(sc->regs, LANEX_REG(lane, 0x84), 19, 16,
  1242. sc->lane[lane].rx_start.att);
  1243. FINSR(sc->regs, LANEX_REG(lane, 0x84), 23, 20,
  1244. sc->lane[lane].rx_start.boost);
  1245. }
  1246. static void kserdes_hs_init_config(struct kserdes_config *sc)
  1247. {
  1248. int i;
  1249. if (sc->phy_type != KSERDES_PHY_XGE) {
  1250. if (sc->link_rate >= KSERDES_LINK_RATE_9P8304G)
  1251. FINSR(sc->regs, CML_REG(0xbc), 28, 24, 0x1e);
  1252. }
  1253. for_each_enable_lane(sc, i)
  1254. kserdes_set_tx_idle(sc, i);
  1255. if (sc->link_rate >= KSERDES_LINK_RATE_9P8304G) {
  1256. if (sc->phy_type != KSERDES_PHY_XGE) {
  1257. for_each_enable_lane(sc, i)
  1258. kserdes_force_signal_detect_low(sc, i);
  1259. for_each_enable_lane(sc, i)
  1260. FINSR(sc->regs, LANEX_REG(i, 0x78),
  1261. 30, 24, 0x7f);
  1262. } else {
  1263. FINSR(sc->regs, CML_REG(0x10c), 7, 0, 0xff);
  1264. }
  1265. }
  1266. }
  1267. static int kserdes_lanes_enable_common(struct kserdes_config *sc,
  1268. struct kserdes_ofs *sofs)
  1269. {
  1270. u32 val, lane_mask = 0;
  1271. int i, ret;
  1272. for_each_lane(sc, i) {
  1273. if (sc->lane[i].enable)
  1274. lane_mask |= BIT(i);
  1275. else
  1276. sc->lane[i].enable = 1;
  1277. }
  1278. if (sc->phy_type == KSERDES_PHY_PCIE) {
  1279. dev_err(sc->dev, "kserdes_lanes_enable_common: pcie TBD.\n");
  1280. return -EINVAL;
  1281. }
  1282. kserdes_hs_init_config(sc);
  1283. for_each_enable_lane(sc, i)
  1284. kserdes_set_lane_rx_starts(sc, i);
  1285. kserdes_assert_reset(sc);
  1286. for_each_enable_lane(sc, i)
  1287. kserdes_set_tx_rx_fir_coeff(sc, i);
  1288. for_each_enable_lane(sc, i)
  1289. kserdes_force_signal_detect_low(sc, i);
  1290. ret = kserdes_deassert_reset(sc, 0);
  1291. if (ret) {
  1292. dev_err(sc->dev, "kserdes_deassert_reset FAILED %d\n", ret);
  1293. return ret;
  1294. }
  1295. for_each_enable_lane(sc, i)
  1296. kserdes_enable_common_set_lane_rate(sc, i);
  1297. if (sc->phy_type == KSERDES_PHY_XGE)
  1298. kserdes_xge_pll_enable(sc);
  1299. else
  1300. _kserdes_pll_enable(sc->regs);
  1301. ret = kserdes_check_pll_status(sc);
  1302. if (ret) {
  1303. dev_err(sc->dev,
  1304. "common init: check pll status FAILED %d\n", ret);
  1305. return ret;
  1306. }
  1307. for_each_enable_lane(sc, i)
  1308. _kserdes_lane_enable(sc->regs, i);
  1309. ret = kserdes_check_lanes_status(sc);
  1310. if (ret) {
  1311. dev_err(sc->dev,
  1312. "common init: check lanes status FAILED %d\n", ret);
  1313. return ret;
  1314. }
  1315. usleep_range(5, 10);
  1316. val = _kserdes_get_tx_termination(sc->regs, sc->phy_type);
  1317. kserdes_set_tx_terminations(sc, val);
  1318. if (sc->phy_type == KSERDES_PHY_XGE)
  1319. kserdes_phyb_init_config(sc, sofs);
  1320. else if (sc->link_rate >= KSERDES_LINK_RATE_9P8304G)
  1321. for_each_enable_lane(sc, i)
  1322. kserdes_phya_init_config(sc, i);
  1323. for_each_enable_lane(sc, i)
  1324. kserdes_clr_tx_idle(sc, i);
  1325. for_each_lane(sc, i)
  1326. sc->lane[i].enable = (lane_mask & BIT(i)) >> i;
  1327. return 0;
  1328. }
  1329. static inline u32 _kserdes_get_lane_sd(void __iomem *sregs, u32 lane)
  1330. {
  1331. return FEXTR(kserdes_readl(sregs, PLL_CTRL_REG), lane, lane);
  1332. }
  1333. static int _kserdes_wait_lane_sd(void __iomem *sregs, u32 lane)
  1334. {
  1335. unsigned long timeout = jiffies + msecs_to_jiffies(500);
  1336. unsigned long time_check;
  1337. do {
  1338. time_check = jiffies;
  1339. if (_kserdes_get_lane_sd(sregs, lane))
  1340. break;
  1341. if (time_after(time_check, timeout))
  1342. return -ETIMEDOUT;
  1343. cpu_relax();
  1344. } while (true);
  1345. return 0;
  1346. }
  1347. static void kserdes_rx_att_boost_config_phyb(struct kserdes_config *sc,
  1348. u32 lane)
  1349. {
  1350. u32 tbus_ofs, rxeq_init_reg_ofs, rxeq_ln_reg_ofs, rxeq_ln_force_bit;
  1351. void __iomem *sregs = sc->regs;
  1352. u32 att_start, att_read, boost_read;
  1353. int ret;
  1354. if (sc->phy_type == KSERDES_PHY_XGE) {
  1355. tbus_ofs = 0x10;
  1356. rxeq_init_reg_ofs = 0x9c;
  1357. rxeq_ln_reg_ofs = 0x98;
  1358. rxeq_ln_force_bit = 14;
  1359. } else {
  1360. tbus_ofs = 0x11;
  1361. rxeq_init_reg_ofs = 0x84;
  1362. rxeq_ln_reg_ofs = 0xac;
  1363. rxeq_ln_force_bit = 11;
  1364. }
  1365. att_start = kserdes_readl(sregs, LANEX_REG(lane, 0x8c));
  1366. att_start = (att_start >> 8) & 0xf;
  1367. att_read = _kserdes_read_select_tbus(sregs, lane + 1, tbus_ofs);
  1368. att_read = (att_read >> 4) & 0xf;
  1369. FINSR(sregs, LANEX_REG(lane, 0x8c), 11, 8, att_read);
  1370. FINSR(sregs, LANEX_REG(lane, rxeq_init_reg_ofs), 0, 0, 0x0);
  1371. FINSR(sregs, CML_REG(0x8c), 24, 24, 0x0);
  1372. FINSR(sregs, LANEX_REG(lane, rxeq_ln_reg_ofs),
  1373. rxeq_ln_force_bit, rxeq_ln_force_bit, 0x1);
  1374. FINSR(sregs, LANEX_REG(lane, rxeq_ln_reg_ofs),
  1375. rxeq_ln_force_bit, rxeq_ln_force_bit, 0x0);
  1376. ret = kserdes_wait_lane_rx_valid(sc, lane);
  1377. if (ret) {
  1378. dev_dbg(sc->dev, "kserdes_wait_lane_rx_valid %d FAILED: %d\n",
  1379. lane, ret);
  1380. }
  1381. usleep_range(300, 600);
  1382. boost_read = _kserdes_read_select_tbus(sregs, lane + 1, tbus_ofs);
  1383. boost_read = (boost_read >> 8) & 0xf;
  1384. if (!boost_read) {
  1385. FINSR(sregs, LANEX_REG(lane, 0x2c), 2, 2, 0x1);
  1386. FINSR(sregs, LANEX_REG(lane, 0x2c), 18, 12, 0x2);
  1387. FINSR(sregs, LANEX_REG(lane, 0x2c), 9, 3, 0x1);
  1388. FINSR(sregs, LANEX_REG(lane, 0x2c), 10, 10, 0x1);
  1389. FINSR(sregs, LANEX_REG(lane, 0x2c), 10, 10, 0x0);
  1390. FINSR(sregs, LANEX_REG(lane, 0x2c), 2, 2, 0x0);
  1391. FINSR(sregs, LANEX_REG(lane, 0x2c), 18, 12, 0x0);
  1392. FINSR(sregs, LANEX_REG(lane, 0x2c), 9, 3, 0x0);
  1393. }
  1394. FINSR(sregs, LANEX_REG(lane, 0x8c), 11, 8, att_start);
  1395. FINSR(sregs, LANEX_REG(lane, rxeq_init_reg_ofs), 0, 0, 0x1);
  1396. FINSR(sregs, CML_REG(0x8c), 24, 24, 0x1);
  1397. }
  1398. static int kserdes_set_dlev_patt_adapt(struct kserdes_config *sc,
  1399. u32 lane, u32 pattern,
  1400. struct kserdes_lane_ofs *lofs)
  1401. {
  1402. struct kserdes_cmp_coef_ofs *ctofs = &lofs->ct_ofs[4];
  1403. void __iomem *sregs = sc->regs;
  1404. u32 dlevp, dlevn, dlevavg;
  1405. int ret;
  1406. FINSR(sregs, CML_REG(0x158), 14, 8, pattern);
  1407. FINSR(sregs, LANEX_REG(lane, 0x98), 14, 14, 1);
  1408. FINSR(sregs, LANEX_REG(lane, 0x98), 14, 14, 0);
  1409. ret = kserdes_wait_lane_rx_valid(sc, lane);
  1410. if (ret) {
  1411. dev_dbg(sc->dev,
  1412. "set dlev patt: wait_lane_rx_valid FAILED %d\n", ret);
  1413. return ret;
  1414. }
  1415. dlevp = _kserdes_read_select_tbus(sregs, lane + 1, 0x44);
  1416. dlevp = (dlevp >> 4) & 0xff;
  1417. dlevn = _kserdes_read_select_tbus(sregs, lane + 1, 0x45);
  1418. dlevn &= 0xff;
  1419. if (ctofs->cmp <= 120)
  1420. dlevavg = ctofs->cmp - dlevn;
  1421. else if (ctofs->cmp >= 134)
  1422. dlevavg = dlevp - ctofs->cmp;
  1423. else
  1424. dlevavg = (dlevp - dlevn) / 2;
  1425. return dlevavg;
  1426. }
  1427. static u32 kserdes_eye_monitor_dll_ovr(struct kserdes_config *sc,
  1428. u32 lane, u32 phase_num, u32 t_offset,
  1429. u32 phase_shift)
  1430. {
  1431. void __iomem *sregs = sc->regs;
  1432. u32 tbus_data, delay, partial_eye, i;
  1433. u32 start_bin = 0, end_bin = 0;
  1434. u32 eye_scan_errors_array[128];
  1435. u32 error_free = 0, val_0, val_1;
  1436. u32 max_dly = 128;
  1437. bool not_phy_xge = (sc->phy_type != KSERDES_PHY_XGE);
  1438. if (t_offset == 0)
  1439. t_offset++;
  1440. if (phase_num == 1) {
  1441. val_0 = 0x00400000;
  1442. val_1 = 0x00000011;
  1443. } else {
  1444. val_0 = 0x00800000;
  1445. val_1 = 0x00000009;
  1446. }
  1447. reg_rmw(sregs + LANEX_REG(lane, 0x2c), val_0, GENMASK(23, 22));
  1448. reg_rmw(sregs + LANEX_REG(lane, 0x30), val_1, GENMASK(4, 0));
  1449. reg_rmw(sregs + CML_REG(0xb8), 0x00004000, GENMASK(15, 8));
  1450. if (phase_num == 1)
  1451. val_0 = 0xffef0000;
  1452. else
  1453. val_0 = 0xfff60000;
  1454. reg_rmw(sregs + CML_REG(0xb8), val_0, GENMASK(31, 16));
  1455. reg_rmw(sregs + CML_REG(0xbc), 0x000fffff, GENMASK(19, 0));
  1456. tbus_data = _kserdes_read_select_tbus(sregs, lane + 1, 0x02);
  1457. tbus_data = tbus_data;
  1458. usleep_range(250, 500);
  1459. for (i = 0; i < max_dly; i = i + t_offset) {
  1460. reg_rmw(sregs + LANEX_REG(lane, 0x2c),
  1461. (i & 0xff) << 24, GENMASK(31, 24));
  1462. reg_rmw(sregs + LANEX_REG(lane, 0x30),
  1463. phase_shift, GENMASK(1, 0));
  1464. usleep_range(5, 10);
  1465. reg_rmw(sregs + CML_REG(0xb8), 0x0000c000, GENMASK(15, 8));
  1466. usleep_range(500, 1000);
  1467. val_0 = _kserdes_read_select_tbus(sregs, lane + 1,
  1468. not_phy_xge ? 0x1a : 0x19);
  1469. val_0 <<= 4;
  1470. val_1 = _kserdes_read_select_tbus(sregs, lane + 1,
  1471. not_phy_xge ? 0x1b : 0x1a);
  1472. val_1 = (val_1 >> 8) & 0xf;
  1473. eye_scan_errors_array[i] = (val_0 | val_1);
  1474. reg_rmw(sregs + CML_REG(0xb8), 0x00004000, GENMASK(15, 8));
  1475. }
  1476. partial_eye = 0;
  1477. error_free = 0;
  1478. for (i = 0; i < max_dly; i = i + t_offset) {
  1479. if (i == 0) {
  1480. if (eye_scan_errors_array[i] < 16384)
  1481. partial_eye = 1;
  1482. } else {
  1483. if (eye_scan_errors_array[i] > 16384 + 3000)
  1484. partial_eye = 0;
  1485. }
  1486. if ((eye_scan_errors_array[i] < 16384) &&
  1487. (partial_eye == 0) &&
  1488. (error_free == 0)) {
  1489. if (!((eye_scan_errors_array[i - 1] > 16384) &&
  1490. (eye_scan_errors_array[i + 1] > 16384))) {
  1491. error_free = 1;
  1492. start_bin = i;
  1493. }
  1494. } else if ((eye_scan_errors_array[i] > 16384) &&
  1495. (partial_eye == 0) &&
  1496. (error_free == 1)) {
  1497. if (!((eye_scan_errors_array[i - 1] < 16384) &&
  1498. (eye_scan_errors_array[i + 1] < 16384))) {
  1499. end_bin = i;
  1500. break;
  1501. }
  1502. }
  1503. }
  1504. delay = (end_bin - start_bin) / 4 + start_bin;
  1505. reg_rmw(sregs + LANEX_REG(lane, 0x30), 0x00000000, GENMASK(7, 0));
  1506. reg_rmw(sregs + LANEX_REG(lane, 0x2c), 0x00000003, GENMASK(7, 0));
  1507. reg_rmw(sregs + CML_REG(0x98), 0x00000000, GENMASK(31, 0));
  1508. reg_rmw(sregs + CML_REG(0xb8), 0x00000000, GENMASK(15, 14));
  1509. reg_rmw(sregs + LANEX_REG(lane, 0x2c), 0x00000000, GENMASK(23, 22));
  1510. return delay;
  1511. }
  1512. static void kserdes_rx_calibration_phyb(struct kserdes_config *sc, u32 lane,
  1513. struct kserdes_lane_ofs *lofs,
  1514. struct kserdes_lane_dlev_out *ldlevo)
  1515. {
  1516. struct kserdes_cmp_coef_ofs *ctofs, *ctofs_temp;
  1517. struct kserdes_lane_ofs lofs_temp;
  1518. void __iomem *sregs = sc->regs;
  1519. u32 att, boost, comp_no, att_start, boost_start;
  1520. u32 delay_ovr = 0;
  1521. int dlevavg_temp[6];
  1522. delay_ovr = kserdes_eye_monitor_dll_ovr(sc, lane, 0, 1, 0);
  1523. ldlevo->delay = delay_ovr;
  1524. FINSR(sregs, CML_REG(0x164), 15, 15, 1);
  1525. FINSR(sregs, CML_REG(0x164), 16, 16, 1);
  1526. FINSR(sregs, CML_REG(0x164), 31, 26, (128 + delay_ovr) & 0x3f);
  1527. FINSR(sregs, CML_REG(0x168), 2, 0, (128 + delay_ovr) >> 6);
  1528. FINSR(sregs, LANEX_REG(lane, 0x9c), 1, 1, 0);
  1529. FINSR(sregs, LANEX_REG(lane, 0x9c), 0, 0, 0);
  1530. att_start = (kserdes_readl(sregs, LANEX_REG(lane, 0x8c)) >> 8) & 0xf;
  1531. boost_start = (kserdes_readl(sregs, LANEX_REG(lane, 0x8c)) >> 12) & 0xf;
  1532. att = _kserdes_read_select_tbus(sregs, lane + 1, 0x10);
  1533. boost = (att >> 8) & 0xf;
  1534. att = (att >> 4) & 0xf;
  1535. FINSR(sregs, LANEX_REG(lane, 0x8c), 11, 8, att);
  1536. FINSR(sregs, LANEX_REG(lane, 0x8c), 15, 12, boost);
  1537. dlevavg_temp[0] = kserdes_set_dlev_patt_adapt(sc, lane, 0x71, lofs);
  1538. dlevavg_temp[1] = kserdes_set_dlev_patt_adapt(sc, lane, 0x61, lofs);
  1539. dlevavg_temp[2] = kserdes_set_dlev_patt_adapt(sc, lane, 0x79, lofs);
  1540. dlevavg_temp[3] = kserdes_set_dlev_patt_adapt(sc, lane, 0x75, lofs);
  1541. dlevavg_temp[4] = kserdes_set_dlev_patt_adapt(sc, lane, 0x73, lofs);
  1542. dlevavg_temp[5] = kserdes_set_dlev_patt_adapt(sc, lane, 0x70, lofs);
  1543. ldlevo->coef_vals[0] = (dlevavg_temp[0] - dlevavg_temp[1]) / 2;
  1544. ldlevo->coef_vals[1] = (dlevavg_temp[0] - dlevavg_temp[2]) / -2;
  1545. ldlevo->coef_vals[2] = (dlevavg_temp[0] - dlevavg_temp[3]) / -2;
  1546. ldlevo->coef_vals[3] = (dlevavg_temp[0] - dlevavg_temp[4]) / -2;
  1547. ldlevo->coef_vals[4] = (dlevavg_temp[0] - dlevavg_temp[5]) / 2;
  1548. ldlevo->coef_vals[0] = ldlevo->coef_vals[0] -
  1549. ldlevo->coef_vals[0] / 3;
  1550. for (comp_no = 1; comp_no < 5; comp_no++) {
  1551. ctofs = &lofs->ct_ofs[comp_no];
  1552. ctofs_temp = &lofs_temp.ct_ofs[comp_no];
  1553. ctofs_temp->cmp = ctofs->cmp;
  1554. if ((comp_no == 1) || (comp_no == 3)) {
  1555. ctofs_temp->coef1 = ldlevo->coef_vals[0] + ctofs->coef1;
  1556. ctofs_temp->coef2 = ldlevo->coef_vals[1] + ctofs->coef2;
  1557. ctofs_temp->coef3 = ldlevo->coef_vals[2] + ctofs->coef3;
  1558. ctofs_temp->coef4 = ldlevo->coef_vals[3] + ctofs->coef4;
  1559. ctofs_temp->coef5 = ldlevo->coef_vals[4] + ctofs->coef5;
  1560. } else {
  1561. ctofs_temp->coef1 = ctofs->coef1;
  1562. ctofs_temp->coef2 = ctofs->coef2;
  1563. ctofs_temp->coef3 = ctofs->coef3;
  1564. ctofs_temp->coef4 = ctofs->coef4;
  1565. ctofs_temp->coef5 = ctofs->coef5;
  1566. }
  1567. _kserdes_set_ofs(sregs, lane, comp_no, ctofs_temp);
  1568. }
  1569. FINSR(sregs, LANEX_REG(lane, 0x8c), 11, 8, att_start);
  1570. FINSR(sregs, LANEX_REG(lane, 0x8c), 15, 12, boost_start);
  1571. FINSR(sregs, LANEX_REG(lane, 0x9c), 1, 1, 1);
  1572. FINSR(sregs, LANEX_REG(lane, 0x9c), 0, 0, 1);
  1573. }
  1574. static int kserdes_rx_boost_config_phya(struct kserdes_config *sc, u32 lane)
  1575. {
  1576. u32 boost_read;
  1577. int ret;
  1578. bool phy_xge = (sc->phy_type == KSERDES_PHY_XGE);
  1579. ret = kserdes_wait_lane_rx_valid(sc, lane);
  1580. if (ret) {
  1581. dev_err(sc->dev,
  1582. "config_phya: wait_lane_rx_valid FAILED %d\n", ret);
  1583. return ret;
  1584. }
  1585. boost_read = _kserdes_read_select_tbus(sc->regs, lane + 1,
  1586. phy_xge ? 0x10 : 0x11);
  1587. boost_read = (boost_read >> 8) & 0xf;
  1588. if (!boost_read) {
  1589. FINSR(sc->regs, LANEX_REG(lane, 0x2c), 2, 2, 0x1);
  1590. FINSR(sc->regs, LANEX_REG(lane, 0x2c), 18, 12, 0x2);
  1591. FINSR(sc->regs, LANEX_REG(lane, 0x2c), 9, 3, 0x1);
  1592. FINSR(sc->regs, LANEX_REG(lane, 0x2c), 10, 10, 0x1);
  1593. FINSR(sc->regs, LANEX_REG(lane, 0x2c), 10, 10, 0x0);
  1594. FINSR(sc->regs, LANEX_REG(lane, 0x2c), 2, 2, 0x0);
  1595. FINSR(sc->regs, LANEX_REG(lane, 0x2c), 18, 12, 0x0);
  1596. FINSR(sc->regs, LANEX_REG(lane, 0x2c), 9, 3, 0x0);
  1597. }
  1598. return 0;
  1599. }
  1600. static int kserdes_enable_lane_rx(struct kserdes_config *sc, u32 lane,
  1601. struct kserdes_lane_ofs *lofs,
  1602. struct kserdes_lane_dlev_out *ldlevo)
  1603. {
  1604. int ret = 0;
  1605. _kserdes_force_signal_detect_high(sc->regs, lane);
  1606. if (!sc->rx_force_enable) {
  1607. ret = _kserdes_wait_lane_sd(sc->regs, lane);
  1608. if (ret) {
  1609. dev_err(sc->dev,
  1610. "init_lane_rx wait sd valid FAILED %d\n", ret);
  1611. return ret;
  1612. }
  1613. if ((sc->phy_type == KSERDES_PHY_XGE) ||
  1614. (sc->link_rate >= KSERDES_LINK_RATE_5G)) {
  1615. if (sc->lane[lane].ctrl_rate == KSERDES_FULL_RATE) {
  1616. ret = kserdes_wait_lane_rx_valid(sc, lane);
  1617. if (ret) {
  1618. dev_err(sc->dev,
  1619. "init_lane_rx wait rx valid FAILED %d\n",
  1620. ret);
  1621. return ret;
  1622. }
  1623. }
  1624. }
  1625. if (sc->phy_type == KSERDES_PHY_XGE) {
  1626. kserdes_rx_att_boost_config_phyb(sc, lane);
  1627. } else if ((sc->link_rate >= KSERDES_LINK_RATE_5G) &&
  1628. (sc->lane[lane].ctrl_rate == KSERDES_FULL_RATE)) {
  1629. kserdes_rx_boost_config_phya(sc, lane);
  1630. }
  1631. }
  1632. if (sc->phy_type == KSERDES_PHY_XGE)
  1633. kserdes_rx_calibration_phyb(sc, lane, lofs, ldlevo);
  1634. return ret;
  1635. }
  1636. static int kserdes_recover_lane_rx(struct kserdes_config *sc, u32 lane,
  1637. struct kserdes_lane_ofs *lofs,
  1638. struct kserdes_lane_dlev_out *ldlevo)
  1639. {
  1640. int ret;
  1641. _kserdes_force_signal_detect_high(sc->regs, lane);
  1642. if (!sc->rx_force_enable) {
  1643. ret = _kserdes_wait_lane_sd(sc->regs, lane);
  1644. if (ret) {
  1645. dev_dbg(sc->dev,
  1646. "init_lane_rx wait sd valid FAILED %d\n", ret);
  1647. return ret;
  1648. }
  1649. dev_dbg(sc->dev, "recover_lane_rx sig detcected\n");
  1650. if ((sc->phy_type == KSERDES_PHY_XGE) ||
  1651. (sc->link_rate >= KSERDES_LINK_RATE_5G)) {
  1652. if (sc->lane[lane].ctrl_rate == KSERDES_FULL_RATE) {
  1653. ret = kserdes_wait_lane_rx_valid(sc, lane);
  1654. if (ret) {
  1655. dev_err(sc->dev,
  1656. "init_lane_rx wait rx valid FAILED %d\n",
  1657. ret);
  1658. return ret;
  1659. }
  1660. dev_dbg(sc->dev, "recover_lane_rx rx valid\n");
  1661. }
  1662. }
  1663. if (sc->phy_type == KSERDES_PHY_XGE) {
  1664. kserdes_rx_att_boost_config_phyb(sc, lane);
  1665. } else if ((sc->link_rate >= KSERDES_LINK_RATE_5G) &&
  1666. (sc->lane[lane].ctrl_rate == KSERDES_FULL_RATE)) {
  1667. kserdes_rx_boost_config_phya(sc, lane);
  1668. }
  1669. }
  1670. if (sc->phy_type == KSERDES_PHY_XGE)
  1671. kserdes_rx_calibration_phyb(sc, lane, lofs, ldlevo);
  1672. return 0;
  1673. }
  1674. static int kserdes_sgmii_init(struct kserdes_config *sc)
  1675. {
  1676. return kserdes_load_init_fw(sc, ks2_gbe_serdes_firmwares,
  1677. ARRAY_SIZE(ks2_gbe_serdes_firmwares));
  1678. }
  1679. static int kserdes_xge_init(struct kserdes_config *sc)
  1680. {
  1681. _kserdes_reset(sc->regs);
  1682. return kserdes_load_init_fw(sc, ks2_xgbe_serdes_firmwares,
  1683. ARRAY_SIZE(ks2_xgbe_serdes_firmwares));
  1684. }
  1685. static int kserdes_pcie_init(struct kserdes_config *sc)
  1686. {
  1687. return kserdes_load_init_fw(sc, ks2_pcie_serdes_firmwares,
  1688. ARRAY_SIZE(ks2_pcie_serdes_firmwares));
  1689. }
  1690. static int kserdes_of_parse_lane(struct device *dev,
  1691. struct device_node *np,
  1692. struct kserdes_config *sc)
  1693. {
  1694. struct kserdes_lane_config *lc;
  1695. struct kserdes_equalizer *eq;
  1696. struct kserdes_tx_coeff *tc;
  1697. int lane_num, ret;
  1698. ret = of_property_read_u32(np, "reg", &lane_num);
  1699. if (ret) {
  1700. dev_err(dev, "Failed to parse reg\n");
  1701. return -EINVAL;
  1702. }
  1703. if (lane_num >= sc->lanes) {
  1704. dev_err(dev, "Invalid lane number %u\n", lane_num);
  1705. return -EINVAL;
  1706. }
  1707. lc = &sc->lane[lane_num];
  1708. lc->enable = of_device_is_available(np);
  1709. dev_dbg(dev, "lane %d enabled\n", lane_num);
  1710. if (of_property_read_u32(np, "control-rate", &lc->ctrl_rate)) {
  1711. dev_info(dev, "use default lane control-rate: %u\n",
  1712. lc->ctrl_rate);
  1713. }
  1714. dev_dbg(dev, "lane control-rate: %d\n", lc->ctrl_rate);
  1715. if (of_find_property(np, "loopback", NULL))
  1716. lc->loopback = true;
  1717. else
  1718. lc->loopback = false;
  1719. dev_dbg(dev, "lane loopback: %d\n", lc->loopback);
  1720. eq = &lc->rx_start;
  1721. if (of_property_read_u32_array(np, "rx-start", &eq->att, 2)) {
  1722. dev_info(dev, "use default lane rx-start 0 0\n");
  1723. eq->att = 0;
  1724. eq->boost = 0;
  1725. }
  1726. dev_dbg(dev, "lane rx-start: %d %d\n", eq->att, eq->boost);
  1727. eq = &lc->rx_force;
  1728. if (of_property_read_u32_array(np, "rx-force", &eq->att, 2)) {
  1729. dev_info(dev, "use default lane rx-force 0 0\n");
  1730. eq->att = 0;
  1731. eq->boost = 0;
  1732. }
  1733. dev_dbg(dev, "lane rx-force: %d %d\n", eq->att, eq->boost);
  1734. tc = &lc->tx_coeff;
  1735. if (of_property_read_u32_array(np, "tx-coeff", &tc->c1, 5)) {
  1736. dev_info(dev, "use default tx-coeff 0\n");
  1737. tc->c1 = 0;
  1738. }
  1739. dev_dbg(dev, "tx-coeff: %d %d %d %d %d\n",
  1740. tc->c1, tc->c2, tc->cm, tc->att, tc->vreg);
  1741. return lane_num;
  1742. }
  1743. static void kserdes_set_sgmii_defaults(struct kserdes_config *sc)
  1744. {
  1745. int i;
  1746. sc->link_rate = KSERDES_LINK_RATE_1P25G;
  1747. sc->lanes = 4;
  1748. sc->rx_force_enable = false;
  1749. for_each_lane(sc, i) {
  1750. memset(&sc->lane[i], 0, sizeof(sc->lane[i]));
  1751. sc->lane[i].ctrl_rate = KSERDES_QUARTER_RATE;
  1752. }
  1753. }
  1754. static void kserdes_set_xge_defaults(struct kserdes_config *sc)
  1755. {
  1756. int i;
  1757. sc->link_rate = KSERDES_LINK_RATE_10P3125G;
  1758. sc->lanes = 2;
  1759. sc->rx_force_enable = false;
  1760. for_each_lane(sc, i) {
  1761. memset(&sc->lane[i], 0, sizeof(sc->lane[i]));
  1762. sc->lane[i].ctrl_rate = KSERDES_FULL_RATE;
  1763. }
  1764. }
  1765. static void kserdes_set_pcie_defaults(struct kserdes_config *sc)
  1766. {
  1767. int i;
  1768. sc->link_rate = KSERDES_LINK_RATE_5G;
  1769. sc->lanes = 2;
  1770. sc->rx_force_enable = false;
  1771. for_each_lane(sc, i)
  1772. memset(&sc->lane[i], 0, sizeof(sc->lane[i]));
  1773. }
  1774. static void kserdes_set_defaults(struct kserdes_config *sc,
  1775. enum kserdes_phy_type phy_type)
  1776. {
  1777. switch (phy_type) {
  1778. case KSERDES_PHY_SGMII:
  1779. kserdes_set_sgmii_defaults(sc);
  1780. break;
  1781. case KSERDES_PHY_XGE:
  1782. kserdes_set_xge_defaults(sc);
  1783. break;
  1784. case KSERDES_PHY_PCIE:
  1785. kserdes_set_pcie_defaults(sc);
  1786. break;
  1787. default:
  1788. break;
  1789. }
  1790. }
  1791. static const struct of_device_id kserdes_of_match[] = {
  1792. { .compatible = "ti,keystone-serdes-gbe",
  1793. .data = &kserdes_devtype[KSERDES_PHY_SGMII], },
  1794. { .compatible = "ti,keystone-serdes-xgbe",
  1795. .data = &kserdes_devtype[KSERDES_PHY_XGE], },
  1796. { .compatible = "ti,keystone-serdes-pcie",
  1797. .data = &kserdes_devtype[KSERDES_PHY_PCIE], },
  1798. { },
  1799. };
  1800. MODULE_DEVICE_TABLE(of, kserdes_of_match);
  1801. static int kserdes_phy_enable_rx(struct phy *phy)
  1802. {
  1803. struct kserdes_phy *ks_phy = phy_get_drvdata(phy);
  1804. struct kserdes_dev *sd = dev_get_drvdata(phy->dev.parent);
  1805. struct kserdes_config *sc = &sd->sc;
  1806. struct kserdes_ofs *sofs = &sc->sofs;
  1807. struct kserdes_dlev_out dlevo;
  1808. u32 lanes_up_map = 0;
  1809. u32 i = ks_phy->lane;
  1810. int ret;
  1811. ret = kserdes_enable_lane_rx(sc, i, &sofs->lane_ofs[i],
  1812. &dlevo.lane_dlev_out[i]);
  1813. kserdes_clear_wait_after(sc, BIT(i));
  1814. if (sc->phy_type == KSERDES_PHY_XGE) {
  1815. _kserdes_enable_xgmii_port(sc->peripheral_regmap, i);
  1816. kserdes_wait_link_up(sc, BIT(i), &lanes_up_map);
  1817. }
  1818. return 0;
  1819. }
  1820. static int kserdes_phy_reset(struct phy *phy)
  1821. {
  1822. struct kserdes_phy *ks_phy = phy_get_drvdata(phy);
  1823. struct kserdes_dev *sd = dev_get_drvdata(phy->dev.parent);
  1824. struct kserdes_config *sc = &sd->sc;
  1825. struct kserdes_ofs *sofs = &sc->sofs;
  1826. struct kserdes_dlev_out dlevo;
  1827. u32 i = ks_phy->lane;
  1828. u32 lanes_up_map = 0;
  1829. int ret;
  1830. ret = kserdes_recover_lane_rx(sc, i, &sofs->lane_ofs[i],
  1831. &dlevo.lane_dlev_out[i]);
  1832. kserdes_clear_wait_after(sc, BIT(i));
  1833. _kserdes_enable_xgmii_port(sc->peripheral_regmap, i);
  1834. kserdes_wait_link_up(sc, BIT(i), &lanes_up_map);
  1835. dev_dbg(sd->dev, "phy reset: recover lane %u rx\n", i);
  1836. return ret;
  1837. }
  1838. static struct phy_ops kserdes_phy_ops = {
  1839. .init = kserdes_phy_enable_rx,
  1840. .reset = kserdes_phy_reset,
  1841. .owner = THIS_MODULE,
  1842. };
  1843. static int kserdes_of_parse(struct platform_device *pdev,
  1844. struct kserdes_dev *sd,
  1845. struct device_node *np)
  1846. {
  1847. const struct of_device_id *of_id;
  1848. struct kserdes_config *sc = &sd->sc;
  1849. struct device_node *child;
  1850. struct device *dev = sd->dev;
  1851. struct resource res;
  1852. void __iomem *regs;
  1853. int ret, lane = 0;
  1854. ret = of_address_to_resource(np, SERDES_REG_INDEX, &res);
  1855. if (ret) {
  1856. dev_err(dev, "Can't xlate serdes reg addr of node(%s)\n",
  1857. np->name);
  1858. return ret;
  1859. }
  1860. regs = devm_ioremap_resource(dev, &res);
  1861. if (IS_ERR(regs)) {
  1862. dev_err(dev, "Failed to map serdes register base\n");
  1863. return PTR_ERR(regs);
  1864. }
  1865. sc->regs = regs;
  1866. of_id = of_match_device(kserdes_of_match, dev);
  1867. if (!of_id) {
  1868. dev_err(dev, "unknown phy type\n");
  1869. return -EINVAL;
  1870. }
  1871. pdev->id_entry = of_id->data;
  1872. sc->phy_type = pdev->id_entry->driver_data;
  1873. sc->dev = dev;
  1874. kserdes_set_defaults(sc, sc->phy_type);
  1875. if (sc->phy_type == KSERDES_PHY_XGE) {
  1876. sc->peripheral_regmap =
  1877. syscon_regmap_lookup_by_phandle(np,
  1878. "syscon-peripheral");
  1879. if (IS_ERR(sc->peripheral_regmap)) {
  1880. dev_err(sc->dev,
  1881. "peripheral regmap lookup failed: %ld\n",
  1882. PTR_ERR(sc->peripheral_regmap));
  1883. return PTR_ERR(sc->peripheral_regmap);
  1884. }
  1885. sc->pcsr_regmap =
  1886. syscon_regmap_lookup_by_phandle(np, "syscon-link");
  1887. if (IS_ERR(sc->pcsr_regmap)) {
  1888. dev_err(sc->dev, "link regmap lookup failed: %ld\n",
  1889. PTR_ERR(sc->pcsr_regmap));
  1890. return PTR_ERR(sc->pcsr_regmap);
  1891. }
  1892. }
  1893. if (of_property_read_u32(np, "link-rate-kbps", &sc->link_rate)) {
  1894. dev_info(dev, "use default link-rate-kbps: %u\n",
  1895. sc->link_rate);
  1896. }
  1897. if (of_property_read_u32(np, "num-lanes", &sc->lanes))
  1898. dev_info(dev, "use default num-lanes %d\n", sc->lanes);
  1899. if (sc->lanes > KSERDES_MAX_LANES) {
  1900. sc->lanes = KSERDES_MAX_LANES;
  1901. dev_info(dev, "use max allowed lanes %d\n", sc->lanes);
  1902. }
  1903. if (of_property_read_bool(np, "rx-force-enable"))
  1904. sc->rx_force_enable = true;
  1905. else
  1906. sc->rx_force_enable = false;
  1907. sd->nphys = sc->lanes;
  1908. for_each_child_of_node(np, child) {
  1909. struct kserdes_phy *ks_phy;
  1910. struct phy *phy;
  1911. lane = kserdes_of_parse_lane(dev, child, sc);
  1912. if (lane < 0) {
  1913. ret = lane;
  1914. goto err_child;
  1915. }
  1916. if (!sc->lane[lane].enable)
  1917. continue;
  1918. ks_phy = devm_kzalloc(dev, sizeof(*ks_phy), GFP_KERNEL);
  1919. if (!ks_phy) {
  1920. ret = -ENOMEM;
  1921. goto err_child;
  1922. }
  1923. sd->phys[lane] = ks_phy;
  1924. phy = devm_phy_create(dev, child, &kserdes_phy_ops);
  1925. if (IS_ERR(phy)) {
  1926. dev_err(dev, "falied to create PHY\n");
  1927. ret = PTR_ERR(phy);
  1928. goto err_child;
  1929. }
  1930. sd->phys[lane]->phy = phy;
  1931. sd->phys[lane]->lane = lane;
  1932. phy_set_drvdata(phy, sd->phys[lane]);
  1933. }
  1934. return 0;
  1935. err_child:
  1936. of_node_put(child);
  1937. return ret;
  1938. }
  1939. static int kserdes_provider_lanes_enable_common(struct kserdes_config *sc)
  1940. {
  1941. struct kserdes_ofs *sofs = &sc->sofs;
  1942. unsigned long lanes_needed = 0;
  1943. int ret, i;
  1944. if (sc->firmware)
  1945. return 0;
  1946. for_each_enable_lane(sc, i)
  1947. lanes_needed |= BIT(i);
  1948. if (sc->phy_type == KSERDES_PHY_PCIE) {
  1949. kserdes_pcie_lanes_enable(sc);
  1950. return lanes_needed;
  1951. }
  1952. ret = kserdes_lanes_enable_common(sc, sofs);
  1953. if (ret)
  1954. dev_err(sc->dev, "provider lanes enable: FAILED %d\n", ret);
  1955. return ret;
  1956. }
  1957. static int kserdes_provider_init(struct kserdes_dev *sd)
  1958. {
  1959. struct kserdes_config *sc = &sd->sc;
  1960. struct device *dev = sd->dev;
  1961. int ret;
  1962. switch (sc->phy_type) {
  1963. case KSERDES_PHY_SGMII:
  1964. ret = kserdes_sgmii_init(sc);
  1965. break;
  1966. case KSERDES_PHY_XGE:
  1967. ret = kserdes_xge_init(sc);
  1968. break;
  1969. case KSERDES_PHY_PCIE:
  1970. ret = kserdes_pcie_init(sc);
  1971. break;
  1972. default:
  1973. ret = -EINVAL;
  1974. }
  1975. if (ret < 0) {
  1976. dev_err(dev, "serdes procider init failed %d\n", ret);
  1977. return ret;
  1978. }
  1979. return kserdes_provider_lanes_enable_common(sc);
  1980. }
  1981. static struct phy *kserdes_xlate(struct device *dev,
  1982. struct of_phandle_args *args)
  1983. {
  1984. struct kserdes_dev *sd = dev_get_drvdata(dev);
  1985. struct phy *phy = NULL;
  1986. struct device_node *phynode = args->np;
  1987. int i;
  1988. if (args->args_count) {
  1989. dev_err(dev, "invalid #cell in PHY property\n");
  1990. return ERR_PTR(-EINVAL);
  1991. }
  1992. for (i = 0; i < sd->nphys; i++) {
  1993. if (sd->phys[i] &&
  1994. (phynode == sd->phys[i]->phy->dev.of_node)) {
  1995. phy = sd->phys[i]->phy;
  1996. break;
  1997. }
  1998. }
  1999. return phy;
  2000. }
  2001. static int kserdes_probe(struct platform_device *pdev)
  2002. {
  2003. struct phy_provider *phy_provider;
  2004. struct kserdes_dev *sd;
  2005. struct device_node *np = pdev->dev.of_node;
  2006. struct device *dev = &pdev->dev;
  2007. int ret;
  2008. sd = devm_kzalloc(dev, sizeof(*sd), GFP_KERNEL);
  2009. if (!sd)
  2010. return -ENOMEM;
  2011. pm_runtime_enable(&pdev->dev);
  2012. ret = pm_runtime_get_sync(&pdev->dev);
  2013. if (ret < 0) {
  2014. dev_err(dev, "Failed to enable SerDes power-domain\n");
  2015. pm_runtime_set_suspended(dev);
  2016. pm_runtime_put_noidle(dev);
  2017. return ret;
  2018. }
  2019. sd->dev = dev;
  2020. dev_set_drvdata(dev, sd);
  2021. ret = kserdes_of_parse(pdev, sd, np);
  2022. if (ret)
  2023. goto error;
  2024. phy_provider = devm_of_phy_provider_register(sd->dev, kserdes_xlate);
  2025. if (IS_ERR(phy_provider)) {
  2026. ret = PTR_ERR_OR_ZERO(phy_provider);
  2027. goto error;
  2028. }
  2029. kserdes_provider_init(sd);
  2030. dev_vdbg(&pdev->dev, "probed");
  2031. return 0;
  2032. error:
  2033. pm_runtime_put_sync(dev);
  2034. pm_runtime_disable(dev);
  2035. return ret;
  2036. }
  2037. static struct platform_driver kserdes_driver = {
  2038. .probe = kserdes_probe,
  2039. .driver = {
  2040. .of_match_table = kserdes_of_match,
  2041. .name = "ti,keystone-serdes",
  2042. }
  2043. };
  2044. module_platform_driver(kserdes_driver);
  2045. MODULE_AUTHOR("WingMan Kwok <w-kwok2@ti.com>");
  2046. MODULE_DESCRIPTION("TI Keystone SerDes driver");
  2047. MODULE_LICENSE("GPL v2");