dp.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626
  1. /*
  2. * Copyright (c) 2011-2013, NVIDIA Corporation.
  3. * Copyright 2014 Google Inc.
  4. *
  5. * SPDX-License-Identifier: GPL-2.0
  6. */
  7. #include <common.h>
  8. #include <display.h>
  9. #include <dm.h>
  10. #include <div64.h>
  11. #include <errno.h>
  12. #include <fdtdec.h>
  13. #include <video_bridge.h>
  14. #include <asm/io.h>
  15. #include <asm/arch-tegra/dc.h>
  16. #include "display.h"
  17. #include "edid.h"
  18. #include "sor.h"
  19. #include "displayport.h"
  20. DECLARE_GLOBAL_DATA_PTR;
  21. #define DO_FAST_LINK_TRAINING 1
  22. struct tegra_dp_plat {
  23. ulong base;
  24. };
  25. /**
  26. * struct tegra_dp_priv - private displayport driver info
  27. *
  28. * @dc_dev: Display controller device that is sending the video feed
  29. */
  30. struct tegra_dp_priv {
  31. struct udevice *sor;
  32. struct udevice *dc_dev;
  33. struct dpaux_ctlr *regs;
  34. u8 revision;
  35. int enabled;
  36. };
  37. struct tegra_dp_priv dp_data;
  38. static inline u32 tegra_dpaux_readl(struct tegra_dp_priv *dp, u32 reg)
  39. {
  40. return readl((u32 *)dp->regs + reg);
  41. }
  42. static inline void tegra_dpaux_writel(struct tegra_dp_priv *dp, u32 reg,
  43. u32 val)
  44. {
  45. writel(val, (u32 *)dp->regs + reg);
  46. }
  47. static inline u32 tegra_dc_dpaux_poll_register(struct tegra_dp_priv *dp,
  48. u32 reg, u32 mask, u32 exp_val,
  49. u32 poll_interval_us,
  50. u32 timeout_us)
  51. {
  52. u32 reg_val = 0;
  53. u32 temp = timeout_us;
  54. do {
  55. udelay(poll_interval_us);
  56. reg_val = tegra_dpaux_readl(dp, reg);
  57. if (timeout_us > poll_interval_us)
  58. timeout_us -= poll_interval_us;
  59. else
  60. break;
  61. } while ((reg_val & mask) != exp_val);
  62. if ((reg_val & mask) == exp_val)
  63. return 0; /* success */
  64. debug("dpaux_poll_register 0x%x: timeout: (reg_val)0x%08x & (mask)0x%08x != (exp_val)0x%08x\n",
  65. reg, reg_val, mask, exp_val);
  66. return temp;
  67. }
  68. static inline int tegra_dpaux_wait_transaction(struct tegra_dp_priv *dp)
  69. {
  70. /* According to DP spec, each aux transaction needs to finish
  71. within 40ms. */
  72. if (tegra_dc_dpaux_poll_register(dp, DPAUX_DP_AUXCTL,
  73. DPAUX_DP_AUXCTL_TRANSACTREQ_MASK,
  74. DPAUX_DP_AUXCTL_TRANSACTREQ_DONE,
  75. 100, DP_AUX_TIMEOUT_MS * 1000) != 0) {
  76. debug("dp: DPAUX transaction timeout\n");
  77. return -1;
  78. }
  79. return 0;
  80. }
  81. static int tegra_dc_dpaux_write_chunk(struct tegra_dp_priv *dp, u32 cmd,
  82. u32 addr, u8 *data, u32 *size,
  83. u32 *aux_stat)
  84. {
  85. int i;
  86. u32 reg_val;
  87. u32 timeout_retries = DP_AUX_TIMEOUT_MAX_TRIES;
  88. u32 defer_retries = DP_AUX_DEFER_MAX_TRIES;
  89. u32 temp_data;
  90. if (*size > DP_AUX_MAX_BYTES)
  91. return -1; /* only write one chunk of data */
  92. /* Make sure the command is write command */
  93. switch (cmd) {
  94. case DPAUX_DP_AUXCTL_CMD_I2CWR:
  95. case DPAUX_DP_AUXCTL_CMD_MOTWR:
  96. case DPAUX_DP_AUXCTL_CMD_AUXWR:
  97. break;
  98. default:
  99. debug("dp: aux write cmd 0x%x is invalid\n", cmd);
  100. return -EINVAL;
  101. }
  102. tegra_dpaux_writel(dp, DPAUX_DP_AUXADDR, addr);
  103. for (i = 0; i < DP_AUX_MAX_BYTES / 4; ++i) {
  104. memcpy(&temp_data, data, 4);
  105. tegra_dpaux_writel(dp, DPAUX_DP_AUXDATA_WRITE_W(i), temp_data);
  106. data += 4;
  107. }
  108. reg_val = tegra_dpaux_readl(dp, DPAUX_DP_AUXCTL);
  109. reg_val &= ~DPAUX_DP_AUXCTL_CMD_MASK;
  110. reg_val |= cmd;
  111. reg_val &= ~DPAUX_DP_AUXCTL_CMDLEN_FIELD;
  112. reg_val |= ((*size - 1) << DPAUX_DP_AUXCTL_CMDLEN_SHIFT);
  113. while ((timeout_retries > 0) && (defer_retries > 0)) {
  114. if ((timeout_retries != DP_AUX_TIMEOUT_MAX_TRIES) ||
  115. (defer_retries != DP_AUX_DEFER_MAX_TRIES))
  116. udelay(1);
  117. reg_val |= DPAUX_DP_AUXCTL_TRANSACTREQ_PENDING;
  118. tegra_dpaux_writel(dp, DPAUX_DP_AUXCTL, reg_val);
  119. if (tegra_dpaux_wait_transaction(dp))
  120. debug("dp: aux write transaction timeout\n");
  121. *aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
  122. if ((*aux_stat & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR_PENDING) ||
  123. (*aux_stat & DPAUX_DP_AUXSTAT_RX_ERROR_PENDING) ||
  124. (*aux_stat & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR_PENDING) ||
  125. (*aux_stat & DPAUX_DP_AUXSTAT_NO_STOP_ERROR_PENDING)) {
  126. if (timeout_retries-- > 0) {
  127. debug("dp: aux write retry (0x%x) -- %d\n",
  128. *aux_stat, timeout_retries);
  129. /* clear the error bits */
  130. tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
  131. *aux_stat);
  132. continue;
  133. } else {
  134. debug("dp: aux write got error (0x%x)\n",
  135. *aux_stat);
  136. return -ETIMEDOUT;
  137. }
  138. }
  139. if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_I2CDEFER) ||
  140. (*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_DEFER)) {
  141. if (defer_retries-- > 0) {
  142. debug("dp: aux write defer (0x%x) -- %d\n",
  143. *aux_stat, defer_retries);
  144. /* clear the error bits */
  145. tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
  146. *aux_stat);
  147. continue;
  148. } else {
  149. debug("dp: aux write defer exceeds max retries (0x%x)\n",
  150. *aux_stat);
  151. return -ETIMEDOUT;
  152. }
  153. }
  154. if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_MASK) ==
  155. DPAUX_DP_AUXSTAT_REPLYTYPE_ACK) {
  156. *size = ((*aux_stat) & DPAUX_DP_AUXSTAT_REPLY_M_MASK);
  157. return 0;
  158. } else {
  159. debug("dp: aux write failed (0x%x)\n", *aux_stat);
  160. return -EIO;
  161. }
  162. }
  163. /* Should never come to here */
  164. return -EIO;
  165. }
  166. static int tegra_dc_dpaux_read_chunk(struct tegra_dp_priv *dp, u32 cmd,
  167. u32 addr, u8 *data, u32 *size,
  168. u32 *aux_stat)
  169. {
  170. u32 reg_val;
  171. u32 timeout_retries = DP_AUX_TIMEOUT_MAX_TRIES;
  172. u32 defer_retries = DP_AUX_DEFER_MAX_TRIES;
  173. if (*size > DP_AUX_MAX_BYTES) {
  174. debug("only read one chunk\n");
  175. return -EIO; /* only read one chunk */
  176. }
  177. /* Check to make sure the command is read command */
  178. switch (cmd) {
  179. case DPAUX_DP_AUXCTL_CMD_I2CRD:
  180. case DPAUX_DP_AUXCTL_CMD_I2CREQWSTAT:
  181. case DPAUX_DP_AUXCTL_CMD_MOTRD:
  182. case DPAUX_DP_AUXCTL_CMD_AUXRD:
  183. break;
  184. default:
  185. debug("dp: aux read cmd 0x%x is invalid\n", cmd);
  186. return -EIO;
  187. }
  188. *aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
  189. if (!(*aux_stat & DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)) {
  190. debug("dp: HPD is not detected\n");
  191. return -EIO;
  192. }
  193. tegra_dpaux_writel(dp, DPAUX_DP_AUXADDR, addr);
  194. reg_val = tegra_dpaux_readl(dp, DPAUX_DP_AUXCTL);
  195. reg_val &= ~DPAUX_DP_AUXCTL_CMD_MASK;
  196. reg_val |= cmd;
  197. reg_val &= ~DPAUX_DP_AUXCTL_CMDLEN_FIELD;
  198. reg_val |= ((*size - 1) << DPAUX_DP_AUXCTL_CMDLEN_SHIFT);
  199. while ((timeout_retries > 0) && (defer_retries > 0)) {
  200. if ((timeout_retries != DP_AUX_TIMEOUT_MAX_TRIES) ||
  201. (defer_retries != DP_AUX_DEFER_MAX_TRIES))
  202. udelay(DP_DPCP_RETRY_SLEEP_NS * 2);
  203. reg_val |= DPAUX_DP_AUXCTL_TRANSACTREQ_PENDING;
  204. tegra_dpaux_writel(dp, DPAUX_DP_AUXCTL, reg_val);
  205. if (tegra_dpaux_wait_transaction(dp))
  206. debug("dp: aux read transaction timeout\n");
  207. *aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
  208. if ((*aux_stat & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR_PENDING) ||
  209. (*aux_stat & DPAUX_DP_AUXSTAT_RX_ERROR_PENDING) ||
  210. (*aux_stat & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR_PENDING) ||
  211. (*aux_stat & DPAUX_DP_AUXSTAT_NO_STOP_ERROR_PENDING)) {
  212. if (timeout_retries-- > 0) {
  213. debug("dp: aux read retry (0x%x) -- %d\n",
  214. *aux_stat, timeout_retries);
  215. /* clear the error bits */
  216. tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
  217. *aux_stat);
  218. continue; /* retry */
  219. } else {
  220. debug("dp: aux read got error (0x%x)\n",
  221. *aux_stat);
  222. return -ETIMEDOUT;
  223. }
  224. }
  225. if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_I2CDEFER) ||
  226. (*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_DEFER)) {
  227. if (defer_retries-- > 0) {
  228. debug("dp: aux read defer (0x%x) -- %d\n",
  229. *aux_stat, defer_retries);
  230. /* clear the error bits */
  231. tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
  232. *aux_stat);
  233. continue;
  234. } else {
  235. debug("dp: aux read defer exceeds max retries (0x%x)\n",
  236. *aux_stat);
  237. return -ETIMEDOUT;
  238. }
  239. }
  240. if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_MASK) ==
  241. DPAUX_DP_AUXSTAT_REPLYTYPE_ACK) {
  242. int i;
  243. u32 temp_data[4];
  244. for (i = 0; i < DP_AUX_MAX_BYTES / 4; ++i)
  245. temp_data[i] = tegra_dpaux_readl(dp,
  246. DPAUX_DP_AUXDATA_READ_W(i));
  247. *size = ((*aux_stat) & DPAUX_DP_AUXSTAT_REPLY_M_MASK);
  248. memcpy(data, temp_data, *size);
  249. return 0;
  250. } else {
  251. debug("dp: aux read failed (0x%x\n", *aux_stat);
  252. return -EIO;
  253. }
  254. }
  255. /* Should never come to here */
  256. debug("%s: can't\n", __func__);
  257. return -EIO;
  258. }
  259. static int tegra_dc_dpaux_read(struct tegra_dp_priv *dp, u32 cmd, u32 addr,
  260. u8 *data, u32 *size, u32 *aux_stat)
  261. {
  262. u32 finished = 0;
  263. u32 cur_size;
  264. int ret = 0;
  265. do {
  266. cur_size = *size - finished;
  267. if (cur_size > DP_AUX_MAX_BYTES)
  268. cur_size = DP_AUX_MAX_BYTES;
  269. ret = tegra_dc_dpaux_read_chunk(dp, cmd, addr,
  270. data, &cur_size, aux_stat);
  271. if (ret)
  272. break;
  273. /* cur_size should be the real size returned */
  274. addr += cur_size;
  275. data += cur_size;
  276. finished += cur_size;
  277. } while (*size > finished);
  278. *size = finished;
  279. return ret;
  280. }
  281. static int tegra_dc_dp_dpcd_read(struct tegra_dp_priv *dp, u32 cmd,
  282. u8 *data_ptr)
  283. {
  284. u32 size = 1;
  285. u32 status = 0;
  286. int ret;
  287. ret = tegra_dc_dpaux_read_chunk(dp, DPAUX_DP_AUXCTL_CMD_AUXRD,
  288. cmd, data_ptr, &size, &status);
  289. if (ret) {
  290. debug("dp: Failed to read DPCD data. CMD 0x%x, Status 0x%x\n",
  291. cmd, status);
  292. }
  293. return ret;
  294. }
  295. static int tegra_dc_dp_dpcd_write(struct tegra_dp_priv *dp, u32 cmd,
  296. u8 data)
  297. {
  298. u32 size = 1;
  299. u32 status = 0;
  300. int ret;
  301. ret = tegra_dc_dpaux_write_chunk(dp, DPAUX_DP_AUXCTL_CMD_AUXWR,
  302. cmd, &data, &size, &status);
  303. if (ret) {
  304. debug("dp: Failed to write DPCD data. CMD 0x%x, Status 0x%x\n",
  305. cmd, status);
  306. }
  307. return ret;
  308. }
  309. static int tegra_dc_i2c_aux_read(struct tegra_dp_priv *dp, u32 i2c_addr,
  310. u8 addr, u8 *data, u32 size, u32 *aux_stat)
  311. {
  312. u32 finished = 0;
  313. int ret = 0;
  314. do {
  315. u32 cur_size = min((u32)DP_AUX_MAX_BYTES, size - finished);
  316. u32 len = 1;
  317. ret = tegra_dc_dpaux_write_chunk(
  318. dp, DPAUX_DP_AUXCTL_CMD_MOTWR, i2c_addr,
  319. &addr, &len, aux_stat);
  320. if (ret) {
  321. debug("%s: error sending address to read.\n",
  322. __func__);
  323. return ret;
  324. }
  325. ret = tegra_dc_dpaux_read_chunk(
  326. dp, DPAUX_DP_AUXCTL_CMD_I2CRD, i2c_addr,
  327. data, &cur_size, aux_stat);
  328. if (ret) {
  329. debug("%s: error reading data.\n", __func__);
  330. return ret;
  331. }
  332. /* cur_size should be the real size returned */
  333. addr += cur_size;
  334. data += cur_size;
  335. finished += cur_size;
  336. } while (size > finished);
  337. return finished;
  338. }
  339. static void tegra_dc_dpaux_enable(struct tegra_dp_priv *dp)
  340. {
  341. /* clear interrupt */
  342. tegra_dpaux_writel(dp, DPAUX_INTR_AUX, 0xffffffff);
  343. /* do not enable interrupt for now. Enable them when Isr in place */
  344. tegra_dpaux_writel(dp, DPAUX_INTR_EN_AUX, 0x0);
  345. tegra_dpaux_writel(dp, DPAUX_HYBRID_PADCTL,
  346. DPAUX_HYBRID_PADCTL_AUX_DRVZ_OHM_50 |
  347. DPAUX_HYBRID_PADCTL_AUX_CMH_V0_70 |
  348. 0x18 << DPAUX_HYBRID_PADCTL_AUX_DRVI_SHIFT |
  349. DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV_ENABLE);
  350. tegra_dpaux_writel(dp, DPAUX_HYBRID_SPARE,
  351. DPAUX_HYBRID_SPARE_PAD_PWR_POWERUP);
  352. }
  353. #ifdef DEBUG
  354. static void tegra_dc_dp_dump_link_cfg(struct tegra_dp_priv *dp,
  355. const struct tegra_dp_link_config *link_cfg)
  356. {
  357. debug("DP config: cfg_name cfg_value\n");
  358. debug(" Lane Count %d\n",
  359. link_cfg->max_lane_count);
  360. debug(" SupportEnhancedFraming %s\n",
  361. link_cfg->support_enhanced_framing ? "Y" : "N");
  362. debug(" Bandwidth %d\n",
  363. link_cfg->max_link_bw);
  364. debug(" bpp %d\n",
  365. link_cfg->bits_per_pixel);
  366. debug(" EnhancedFraming %s\n",
  367. link_cfg->enhanced_framing ? "Y" : "N");
  368. debug(" Scramble_enabled %s\n",
  369. link_cfg->scramble_ena ? "Y" : "N");
  370. debug(" LinkBW %d\n",
  371. link_cfg->link_bw);
  372. debug(" lane_count %d\n",
  373. link_cfg->lane_count);
  374. debug(" activespolarity %d\n",
  375. link_cfg->activepolarity);
  376. debug(" active_count %d\n",
  377. link_cfg->active_count);
  378. debug(" tu_size %d\n",
  379. link_cfg->tu_size);
  380. debug(" active_frac %d\n",
  381. link_cfg->active_frac);
  382. debug(" watermark %d\n",
  383. link_cfg->watermark);
  384. debug(" hblank_sym %d\n",
  385. link_cfg->hblank_sym);
  386. debug(" vblank_sym %d\n",
  387. link_cfg->vblank_sym);
  388. }
  389. #endif
  390. static int _tegra_dp_lower_link_config(struct tegra_dp_priv *dp,
  391. struct tegra_dp_link_config *cfg)
  392. {
  393. switch (cfg->link_bw) {
  394. case SOR_LINK_SPEED_G1_62:
  395. if (cfg->max_link_bw > SOR_LINK_SPEED_G1_62)
  396. cfg->link_bw = SOR_LINK_SPEED_G2_7;
  397. cfg->lane_count /= 2;
  398. break;
  399. case SOR_LINK_SPEED_G2_7:
  400. cfg->link_bw = SOR_LINK_SPEED_G1_62;
  401. break;
  402. case SOR_LINK_SPEED_G5_4:
  403. if (cfg->lane_count == 1) {
  404. cfg->link_bw = SOR_LINK_SPEED_G2_7;
  405. cfg->lane_count = cfg->max_lane_count;
  406. } else {
  407. cfg->lane_count /= 2;
  408. }
  409. break;
  410. default:
  411. debug("dp: Error link rate %d\n", cfg->link_bw);
  412. return -ENOLINK;
  413. }
  414. return (cfg->lane_count > 0) ? 0 : -ENOLINK;
  415. }
  416. /*
  417. * Calcuate if given cfg can meet the mode request.
  418. * Return 0 if mode is possible, -1 otherwise
  419. */
  420. static int tegra_dc_dp_calc_config(struct tegra_dp_priv *dp,
  421. const struct display_timing *timing,
  422. struct tegra_dp_link_config *link_cfg)
  423. {
  424. const u32 link_rate = 27 * link_cfg->link_bw * 1000 * 1000;
  425. const u64 f = 100000; /* precision factor */
  426. u32 num_linkclk_line; /* Number of link clocks per line */
  427. u64 ratio_f; /* Ratio of incoming to outgoing data rate */
  428. u64 frac_f;
  429. u64 activesym_f; /* Activesym per TU */
  430. u64 activecount_f;
  431. u32 activecount;
  432. u32 activepolarity;
  433. u64 approx_value_f;
  434. u32 activefrac = 0;
  435. u64 accumulated_error_f = 0;
  436. u32 lowest_neg_activecount = 0;
  437. u32 lowest_neg_activepolarity = 0;
  438. u32 lowest_neg_tusize = 64;
  439. u32 num_symbols_per_line;
  440. u64 lowest_neg_activefrac = 0;
  441. u64 lowest_neg_error_f = 64 * f;
  442. u64 watermark_f;
  443. int i;
  444. int neg;
  445. if (!link_rate || !link_cfg->lane_count || !timing->pixelclock.typ ||
  446. !link_cfg->bits_per_pixel)
  447. return -1;
  448. if ((u64)timing->pixelclock.typ * link_cfg->bits_per_pixel >=
  449. (u64)link_rate * 8 * link_cfg->lane_count)
  450. return -1;
  451. num_linkclk_line = (u32)(lldiv(link_rate * timing->hactive.typ,
  452. timing->pixelclock.typ));
  453. ratio_f = (u64)timing->pixelclock.typ * link_cfg->bits_per_pixel * f;
  454. ratio_f /= 8;
  455. do_div(ratio_f, link_rate * link_cfg->lane_count);
  456. for (i = 64; i >= 32; --i) {
  457. activesym_f = ratio_f * i;
  458. activecount_f = lldiv(activesym_f, (u32)f) * f;
  459. frac_f = activesym_f - activecount_f;
  460. activecount = (u32)(lldiv(activecount_f, (u32)f));
  461. if (frac_f < (lldiv(f, 2))) /* fraction < 0.5 */
  462. activepolarity = 0;
  463. else {
  464. activepolarity = 1;
  465. frac_f = f - frac_f;
  466. }
  467. if (frac_f != 0) {
  468. /* warning: frac_f should be 64-bit */
  469. frac_f = lldiv(f * f, frac_f); /* 1 / fraction */
  470. if (frac_f > (15 * f))
  471. activefrac = activepolarity ? 1 : 15;
  472. else
  473. activefrac = activepolarity ?
  474. (u32)lldiv(frac_f, (u32)f) + 1 :
  475. (u32)lldiv(frac_f, (u32)f);
  476. }
  477. if (activefrac == 1)
  478. activepolarity = 0;
  479. if (activepolarity == 1)
  480. approx_value_f = activefrac ? lldiv(
  481. (activecount_f + (activefrac * f - f) * f),
  482. (activefrac * f)) :
  483. activecount_f + f;
  484. else
  485. approx_value_f = activefrac ?
  486. activecount_f + lldiv(f, activefrac) :
  487. activecount_f;
  488. if (activesym_f < approx_value_f) {
  489. accumulated_error_f = num_linkclk_line *
  490. lldiv(approx_value_f - activesym_f, i);
  491. neg = 1;
  492. } else {
  493. accumulated_error_f = num_linkclk_line *
  494. lldiv(activesym_f - approx_value_f, i);
  495. neg = 0;
  496. }
  497. if ((neg && (lowest_neg_error_f > accumulated_error_f)) ||
  498. (accumulated_error_f == 0)) {
  499. lowest_neg_error_f = accumulated_error_f;
  500. lowest_neg_tusize = i;
  501. lowest_neg_activecount = activecount;
  502. lowest_neg_activepolarity = activepolarity;
  503. lowest_neg_activefrac = activefrac;
  504. if (accumulated_error_f == 0)
  505. break;
  506. }
  507. }
  508. if (lowest_neg_activefrac == 0) {
  509. link_cfg->activepolarity = 0;
  510. link_cfg->active_count = lowest_neg_activepolarity ?
  511. lowest_neg_activecount : lowest_neg_activecount - 1;
  512. link_cfg->tu_size = lowest_neg_tusize;
  513. link_cfg->active_frac = 1;
  514. } else {
  515. link_cfg->activepolarity = lowest_neg_activepolarity;
  516. link_cfg->active_count = (u32)lowest_neg_activecount;
  517. link_cfg->tu_size = lowest_neg_tusize;
  518. link_cfg->active_frac = (u32)lowest_neg_activefrac;
  519. }
  520. watermark_f = lldiv(ratio_f * link_cfg->tu_size * (f - ratio_f), f);
  521. link_cfg->watermark = (u32)(lldiv(watermark_f + lowest_neg_error_f,
  522. f)) + link_cfg->bits_per_pixel / 4 - 1;
  523. num_symbols_per_line = (timing->hactive.typ *
  524. link_cfg->bits_per_pixel) /
  525. (8 * link_cfg->lane_count);
  526. if (link_cfg->watermark > 30) {
  527. debug("dp: sor setting: unable to get a good tusize, force watermark to 30\n");
  528. link_cfg->watermark = 30;
  529. return -1;
  530. } else if (link_cfg->watermark > num_symbols_per_line) {
  531. debug("dp: sor setting: force watermark to the number of symbols in the line\n");
  532. link_cfg->watermark = num_symbols_per_line;
  533. return -1;
  534. }
  535. /*
  536. * Refer to dev_disp.ref for more information.
  537. * # symbols/hblank = ((SetRasterBlankEnd.X + SetRasterSize.Width -
  538. * SetRasterBlankStart.X - 7) * link_clk / pclk)
  539. * - 3 * enhanced_framing - Y
  540. * where Y = (# lanes == 4) 3 : (# lanes == 2) ? 6 : 12
  541. */
  542. link_cfg->hblank_sym = (int)lldiv(((uint64_t)timing->hback_porch.typ +
  543. timing->hfront_porch.typ + timing->hsync_len.typ - 7) *
  544. link_rate, timing->pixelclock.typ) -
  545. 3 * link_cfg->enhanced_framing -
  546. (12 / link_cfg->lane_count);
  547. if (link_cfg->hblank_sym < 0)
  548. link_cfg->hblank_sym = 0;
  549. /*
  550. * Refer to dev_disp.ref for more information.
  551. * # symbols/vblank = ((SetRasterBlankStart.X -
  552. * SetRasterBlankEen.X - 25) * link_clk / pclk)
  553. * - Y - 1;
  554. * where Y = (# lanes == 4) 12 : (# lanes == 2) ? 21 : 39
  555. */
  556. link_cfg->vblank_sym = (int)lldiv(((uint64_t)timing->hactive.typ - 25)
  557. * link_rate, timing->pixelclock.typ) - (36 /
  558. link_cfg->lane_count) - 4;
  559. if (link_cfg->vblank_sym < 0)
  560. link_cfg->vblank_sym = 0;
  561. link_cfg->is_valid = 1;
  562. #ifdef DEBUG
  563. tegra_dc_dp_dump_link_cfg(dp, link_cfg);
  564. #endif
  565. return 0;
  566. }
  567. static int tegra_dc_dp_init_max_link_cfg(
  568. const struct display_timing *timing,
  569. struct tegra_dp_priv *dp,
  570. struct tegra_dp_link_config *link_cfg)
  571. {
  572. const int drive_current = 0x40404040;
  573. const int preemphasis = 0x0f0f0f0f;
  574. const int postcursor = 0;
  575. u8 dpcd_data;
  576. int ret;
  577. ret = tegra_dc_dp_dpcd_read(dp, DP_MAX_LANE_COUNT, &dpcd_data);
  578. if (ret)
  579. return ret;
  580. link_cfg->max_lane_count = dpcd_data & DP_MAX_LANE_COUNT_MASK;
  581. link_cfg->tps3_supported = (dpcd_data &
  582. DP_MAX_LANE_COUNT_TPS3_SUPPORTED_YES) ? 1 : 0;
  583. link_cfg->support_enhanced_framing =
  584. (dpcd_data & DP_MAX_LANE_COUNT_ENHANCED_FRAMING_YES) ?
  585. 1 : 0;
  586. ret = tegra_dc_dp_dpcd_read(dp, DP_MAX_DOWNSPREAD, &dpcd_data);
  587. if (ret)
  588. return ret;
  589. link_cfg->downspread = (dpcd_data & DP_MAX_DOWNSPREAD_VAL_0_5_PCT) ?
  590. 1 : 0;
  591. ret = tegra_dc_dp_dpcd_read(dp, NV_DPCD_TRAINING_AUX_RD_INTERVAL,
  592. &link_cfg->aux_rd_interval);
  593. if (ret)
  594. return ret;
  595. ret = tegra_dc_dp_dpcd_read(dp, DP_MAX_LINK_RATE,
  596. &link_cfg->max_link_bw);
  597. if (ret)
  598. return ret;
  599. /*
  600. * Set to a high value for link training and attach.
  601. * Will be re-programmed when dp is enabled.
  602. */
  603. link_cfg->drive_current = drive_current;
  604. link_cfg->preemphasis = preemphasis;
  605. link_cfg->postcursor = postcursor;
  606. ret = tegra_dc_dp_dpcd_read(dp, DP_EDP_CONFIGURATION_CAP, &dpcd_data);
  607. if (ret)
  608. return ret;
  609. link_cfg->alt_scramber_reset_cap =
  610. (dpcd_data & DP_EDP_CONFIGURATION_CAP_ASC_RESET_YES) ?
  611. 1 : 0;
  612. link_cfg->only_enhanced_framing =
  613. (dpcd_data & DP_EDP_CONFIGURATION_CAP_FRAMING_CHANGE_YES) ?
  614. 1 : 0;
  615. link_cfg->lane_count = link_cfg->max_lane_count;
  616. link_cfg->link_bw = link_cfg->max_link_bw;
  617. link_cfg->enhanced_framing = link_cfg->support_enhanced_framing;
  618. link_cfg->frame_in_ms = (1000 / 60) + 1;
  619. tegra_dc_dp_calc_config(dp, timing, link_cfg);
  620. return 0;
  621. }
  622. static int tegra_dc_dp_set_assr(struct tegra_dp_priv *priv,
  623. struct udevice *sor, int ena)
  624. {
  625. int ret;
  626. u8 dpcd_data = ena ?
  627. DP_MAIN_LINK_CHANNEL_CODING_SET_ASC_RESET_ENABLE :
  628. DP_MAIN_LINK_CHANNEL_CODING_SET_ASC_RESET_DISABLE;
  629. ret = tegra_dc_dp_dpcd_write(priv, DP_EDP_CONFIGURATION_SET,
  630. dpcd_data);
  631. if (ret)
  632. return ret;
  633. /* Also reset the scrambler to 0xfffe */
  634. tegra_dc_sor_set_internal_panel(sor, ena);
  635. return 0;
  636. }
  637. static int tegra_dp_set_link_bandwidth(struct tegra_dp_priv *dp,
  638. struct udevice *sor,
  639. u8 link_bw)
  640. {
  641. tegra_dc_sor_set_link_bandwidth(sor, link_bw);
  642. /* Sink side */
  643. return tegra_dc_dp_dpcd_write(dp, DP_LINK_BW_SET, link_bw);
  644. }
  645. static int tegra_dp_set_lane_count(struct tegra_dp_priv *dp,
  646. const struct tegra_dp_link_config *link_cfg,
  647. struct udevice *sor)
  648. {
  649. u8 dpcd_data;
  650. int ret;
  651. /* check if panel support enhanched_framing */
  652. dpcd_data = link_cfg->lane_count;
  653. if (link_cfg->enhanced_framing)
  654. dpcd_data |= DP_LANE_COUNT_SET_ENHANCEDFRAMING_T;
  655. ret = tegra_dc_dp_dpcd_write(dp, DP_LANE_COUNT_SET, dpcd_data);
  656. if (ret)
  657. return ret;
  658. tegra_dc_sor_set_lane_count(sor, link_cfg->lane_count);
  659. /* Also power down lanes that will not be used */
  660. return 0;
  661. }
  662. static int tegra_dc_dp_link_trained(struct tegra_dp_priv *dp,
  663. const struct tegra_dp_link_config *cfg)
  664. {
  665. u32 lane;
  666. u8 mask;
  667. u8 data;
  668. int ret;
  669. for (lane = 0; lane < cfg->lane_count; ++lane) {
  670. ret = tegra_dc_dp_dpcd_read(dp, (lane / 2) ?
  671. DP_LANE2_3_STATUS : DP_LANE0_1_STATUS,
  672. &data);
  673. if (ret)
  674. return ret;
  675. mask = (lane & 1) ?
  676. NV_DPCD_STATUS_LANEXPLUS1_CR_DONE_YES |
  677. NV_DPCD_STATUS_LANEXPLUS1_CHN_EQ_DONE_YES |
  678. NV_DPCD_STATUS_LANEXPLUS1_SYMBOL_LOCKED_YES :
  679. DP_LANE_CR_DONE |
  680. DP_LANE_CHANNEL_EQ_DONE |
  681. DP_LANE_SYMBOL_LOCKED;
  682. if ((data & mask) != mask)
  683. return -1;
  684. }
  685. return 0;
  686. }
  687. static int tegra_dp_channel_eq_status(struct tegra_dp_priv *dp,
  688. const struct tegra_dp_link_config *cfg)
  689. {
  690. u32 cnt;
  691. u32 n_lanes = cfg->lane_count;
  692. u8 data;
  693. u8 ce_done = 1;
  694. int ret;
  695. for (cnt = 0; cnt < n_lanes / 2; cnt++) {
  696. ret = tegra_dc_dp_dpcd_read(dp, DP_LANE0_1_STATUS + cnt, &data);
  697. if (ret)
  698. return ret;
  699. if (n_lanes == 1) {
  700. ce_done = (data & (0x1 <<
  701. NV_DPCD_STATUS_LANEX_CHN_EQ_DONE_SHIFT)) &&
  702. (data & (0x1 <<
  703. NV_DPCD_STATUS_LANEX_SYMBOL_LOCKED_SHFIT));
  704. break;
  705. } else if (!(data & (0x1 <<
  706. NV_DPCD_STATUS_LANEX_CHN_EQ_DONE_SHIFT)) ||
  707. !(data & (0x1 <<
  708. NV_DPCD_STATUS_LANEX_SYMBOL_LOCKED_SHFIT)) ||
  709. !(data & (0x1 <<
  710. NV_DPCD_STATUS_LANEXPLUS1_CHN_EQ_DONE_SHIFT)) ||
  711. !(data & (0x1 <<
  712. NV_DPCD_STATUS_LANEXPLUS1_SYMBOL_LOCKED_SHIFT)))
  713. return -EIO;
  714. }
  715. if (ce_done) {
  716. ret = tegra_dc_dp_dpcd_read(dp,
  717. DP_LANE_ALIGN_STATUS_UPDATED,
  718. &data);
  719. if (ret)
  720. return ret;
  721. if (!(data & NV_DPCD_LANE_ALIGN_STATUS_UPDATED_DONE_YES))
  722. ce_done = 0;
  723. }
  724. return ce_done ? 0 : -EIO;
  725. }
  726. static int tegra_dp_clock_recovery_status(struct tegra_dp_priv *dp,
  727. const struct tegra_dp_link_config *cfg)
  728. {
  729. u32 cnt;
  730. u32 n_lanes = cfg->lane_count;
  731. u8 data_ptr;
  732. int ret;
  733. for (cnt = 0; cnt < n_lanes / 2; cnt++) {
  734. ret = tegra_dc_dp_dpcd_read(dp, (DP_LANE0_1_STATUS + cnt),
  735. &data_ptr);
  736. if (ret)
  737. return ret;
  738. if (n_lanes == 1)
  739. return (data_ptr & NV_DPCD_STATUS_LANEX_CR_DONE_YES) ?
  740. 1 : 0;
  741. else if (!(data_ptr & NV_DPCD_STATUS_LANEX_CR_DONE_YES) ||
  742. !(data_ptr & (NV_DPCD_STATUS_LANEXPLUS1_CR_DONE_YES)))
  743. return 0;
  744. }
  745. return 1;
  746. }
  747. static int tegra_dp_lt_adjust(struct tegra_dp_priv *dp, u32 pe[4], u32 vs[4],
  748. u32 pc[4], u8 pc_supported,
  749. const struct tegra_dp_link_config *cfg)
  750. {
  751. size_t cnt;
  752. u8 data_ptr;
  753. u32 n_lanes = cfg->lane_count;
  754. int ret;
  755. for (cnt = 0; cnt < n_lanes / 2; cnt++) {
  756. ret = tegra_dc_dp_dpcd_read(dp, DP_ADJUST_REQUEST_LANE0_1 + cnt,
  757. &data_ptr);
  758. if (ret)
  759. return ret;
  760. pe[2 * cnt] = (data_ptr & NV_DPCD_ADJUST_REQ_LANEX_PE_MASK) >>
  761. NV_DPCD_ADJUST_REQ_LANEX_PE_SHIFT;
  762. vs[2 * cnt] = (data_ptr & NV_DPCD_ADJUST_REQ_LANEX_DC_MASK) >>
  763. NV_DPCD_ADJUST_REQ_LANEX_DC_SHIFT;
  764. pe[1 + 2 * cnt] =
  765. (data_ptr & NV_DPCD_ADJUST_REQ_LANEXPLUS1_PE_MASK) >>
  766. NV_DPCD_ADJUST_REQ_LANEXPLUS1_PE_SHIFT;
  767. vs[1 + 2 * cnt] =
  768. (data_ptr & NV_DPCD_ADJUST_REQ_LANEXPLUS1_DC_MASK) >>
  769. NV_DPCD_ADJUST_REQ_LANEXPLUS1_DC_SHIFT;
  770. }
  771. if (pc_supported) {
  772. ret = tegra_dc_dp_dpcd_read(dp, NV_DPCD_ADJUST_REQ_POST_CURSOR2,
  773. &data_ptr);
  774. if (ret)
  775. return ret;
  776. for (cnt = 0; cnt < n_lanes; cnt++) {
  777. pc[cnt] = (data_ptr >>
  778. NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE_SHIFT(cnt)) &
  779. NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE_MASK;
  780. }
  781. }
  782. return 0;
  783. }
  784. static void tegra_dp_wait_aux_training(struct tegra_dp_priv *dp,
  785. bool is_clk_recovery,
  786. const struct tegra_dp_link_config *cfg)
  787. {
  788. if (!cfg->aux_rd_interval)
  789. udelay(is_clk_recovery ? 200 : 500);
  790. else
  791. mdelay(cfg->aux_rd_interval * 4);
  792. }
  793. static void tegra_dp_tpg(struct tegra_dp_priv *dp, u32 tp, u32 n_lanes,
  794. const struct tegra_dp_link_config *cfg)
  795. {
  796. u8 data = (tp == training_pattern_disabled)
  797. ? (tp | NV_DPCD_TRAINING_PATTERN_SET_SC_DISABLED_F)
  798. : (tp | NV_DPCD_TRAINING_PATTERN_SET_SC_DISABLED_T);
  799. tegra_dc_sor_set_dp_linkctl(dp->sor, 1, tp, cfg);
  800. tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET, data);
  801. }
  802. static int tegra_dp_link_config(struct tegra_dp_priv *dp,
  803. const struct tegra_dp_link_config *link_cfg)
  804. {
  805. u8 dpcd_data;
  806. u32 retry;
  807. int ret;
  808. if (link_cfg->lane_count == 0) {
  809. debug("dp: error: lane count is 0. Can not set link config.\n");
  810. return -ENOLINK;
  811. }
  812. /* Set power state if it is not in normal level */
  813. ret = tegra_dc_dp_dpcd_read(dp, DP_SET_POWER, &dpcd_data);
  814. if (ret)
  815. return ret;
  816. if (dpcd_data == DP_SET_POWER_D3) {
  817. dpcd_data = DP_SET_POWER_D0;
  818. /* DP spec requires 3 retries */
  819. for (retry = 3; retry > 0; --retry) {
  820. ret = tegra_dc_dp_dpcd_write(dp, DP_SET_POWER,
  821. dpcd_data);
  822. if (!ret)
  823. break;
  824. if (retry == 1) {
  825. debug("dp: Failed to set DP panel power\n");
  826. return ret;
  827. }
  828. }
  829. }
  830. /* Enable ASSR if possible */
  831. if (link_cfg->alt_scramber_reset_cap) {
  832. ret = tegra_dc_dp_set_assr(dp, dp->sor, 1);
  833. if (ret)
  834. return ret;
  835. }
  836. ret = tegra_dp_set_link_bandwidth(dp, dp->sor, link_cfg->link_bw);
  837. if (ret) {
  838. debug("dp: Failed to set link bandwidth\n");
  839. return ret;
  840. }
  841. ret = tegra_dp_set_lane_count(dp, link_cfg, dp->sor);
  842. if (ret) {
  843. debug("dp: Failed to set lane count\n");
  844. return ret;
  845. }
  846. tegra_dc_sor_set_dp_linkctl(dp->sor, 1, training_pattern_none,
  847. link_cfg);
  848. return 0;
  849. }
  850. static int tegra_dp_lower_link_config(struct tegra_dp_priv *dp,
  851. const struct display_timing *timing,
  852. struct tegra_dp_link_config *cfg)
  853. {
  854. struct tegra_dp_link_config tmp_cfg;
  855. int ret;
  856. tmp_cfg = *cfg;
  857. cfg->is_valid = 0;
  858. ret = _tegra_dp_lower_link_config(dp, cfg);
  859. if (!ret)
  860. ret = tegra_dc_dp_calc_config(dp, timing, cfg);
  861. if (!ret)
  862. ret = tegra_dp_link_config(dp, cfg);
  863. if (ret)
  864. goto fail;
  865. return 0;
  866. fail:
  867. *cfg = tmp_cfg;
  868. tegra_dp_link_config(dp, &tmp_cfg);
  869. return ret;
  870. }
  871. static int tegra_dp_lt_config(struct tegra_dp_priv *dp, u32 pe[4], u32 vs[4],
  872. u32 pc[4], const struct tegra_dp_link_config *cfg)
  873. {
  874. struct udevice *sor = dp->sor;
  875. u32 n_lanes = cfg->lane_count;
  876. u8 pc_supported = cfg->tps3_supported;
  877. u32 cnt;
  878. u32 val;
  879. for (cnt = 0; cnt < n_lanes; cnt++) {
  880. u32 mask = 0;
  881. u32 pe_reg, vs_reg, pc_reg;
  882. u32 shift = 0;
  883. switch (cnt) {
  884. case 0:
  885. mask = PR_LANE2_DP_LANE0_MASK;
  886. shift = PR_LANE2_DP_LANE0_SHIFT;
  887. break;
  888. case 1:
  889. mask = PR_LANE1_DP_LANE1_MASK;
  890. shift = PR_LANE1_DP_LANE1_SHIFT;
  891. break;
  892. case 2:
  893. mask = PR_LANE0_DP_LANE2_MASK;
  894. shift = PR_LANE0_DP_LANE2_SHIFT;
  895. break;
  896. case 3:
  897. mask = PR_LANE3_DP_LANE3_MASK;
  898. shift = PR_LANE3_DP_LANE3_SHIFT;
  899. break;
  900. default:
  901. debug("dp: incorrect lane cnt\n");
  902. return -EINVAL;
  903. }
  904. pe_reg = tegra_dp_pe_regs[pc[cnt]][vs[cnt]][pe[cnt]];
  905. vs_reg = tegra_dp_vs_regs[pc[cnt]][vs[cnt]][pe[cnt]];
  906. pc_reg = tegra_dp_pc_regs[pc[cnt]][vs[cnt]][pe[cnt]];
  907. tegra_dp_set_pe_vs_pc(sor, mask, pe_reg << shift,
  908. vs_reg << shift, pc_reg << shift,
  909. pc_supported);
  910. }
  911. tegra_dp_disable_tx_pu(dp->sor);
  912. udelay(20);
  913. for (cnt = 0; cnt < n_lanes; cnt++) {
  914. u32 max_vs_flag = tegra_dp_is_max_vs(pe[cnt], vs[cnt]);
  915. u32 max_pe_flag = tegra_dp_is_max_pe(pe[cnt], vs[cnt]);
  916. val = (vs[cnt] << NV_DPCD_TRAINING_LANEX_SET_DC_SHIFT) |
  917. (max_vs_flag ?
  918. NV_DPCD_TRAINING_LANEX_SET_DC_MAX_REACHED_T :
  919. NV_DPCD_TRAINING_LANEX_SET_DC_MAX_REACHED_F) |
  920. (pe[cnt] << NV_DPCD_TRAINING_LANEX_SET_PE_SHIFT) |
  921. (max_pe_flag ?
  922. NV_DPCD_TRAINING_LANEX_SET_PE_MAX_REACHED_T :
  923. NV_DPCD_TRAINING_LANEX_SET_PE_MAX_REACHED_F);
  924. tegra_dc_dp_dpcd_write(dp, (DP_TRAINING_LANE0_SET + cnt), val);
  925. }
  926. if (pc_supported) {
  927. for (cnt = 0; cnt < n_lanes / 2; cnt++) {
  928. u32 max_pc_flag0 = tegra_dp_is_max_pc(pc[cnt]);
  929. u32 max_pc_flag1 = tegra_dp_is_max_pc(pc[cnt + 1]);
  930. val = (pc[cnt] << NV_DPCD_LANEX_SET2_PC2_SHIFT) |
  931. (max_pc_flag0 ?
  932. NV_DPCD_LANEX_SET2_PC2_MAX_REACHED_T :
  933. NV_DPCD_LANEX_SET2_PC2_MAX_REACHED_F) |
  934. (pc[cnt + 1] <<
  935. NV_DPCD_LANEXPLUS1_SET2_PC2_SHIFT) |
  936. (max_pc_flag1 ?
  937. NV_DPCD_LANEXPLUS1_SET2_PC2_MAX_REACHED_T :
  938. NV_DPCD_LANEXPLUS1_SET2_PC2_MAX_REACHED_F);
  939. tegra_dc_dp_dpcd_write(dp,
  940. NV_DPCD_TRAINING_LANE0_1_SET2 +
  941. cnt, val);
  942. }
  943. }
  944. return 0;
  945. }
  946. static int _tegra_dp_channel_eq(struct tegra_dp_priv *dp, u32 pe[4],
  947. u32 vs[4], u32 pc[4], u8 pc_supported,
  948. u32 n_lanes,
  949. const struct tegra_dp_link_config *cfg)
  950. {
  951. u32 retry_cnt;
  952. for (retry_cnt = 0; retry_cnt < 4; retry_cnt++) {
  953. int ret;
  954. if (retry_cnt) {
  955. ret = tegra_dp_lt_adjust(dp, pe, vs, pc, pc_supported,
  956. cfg);
  957. if (ret)
  958. return ret;
  959. tegra_dp_lt_config(dp, pe, vs, pc, cfg);
  960. }
  961. tegra_dp_wait_aux_training(dp, false, cfg);
  962. if (!tegra_dp_clock_recovery_status(dp, cfg)) {
  963. debug("dp: CR failed in channel EQ sequence!\n");
  964. break;
  965. }
  966. if (!tegra_dp_channel_eq_status(dp, cfg))
  967. return 0;
  968. }
  969. return -EIO;
  970. }
  971. static int tegra_dp_channel_eq(struct tegra_dp_priv *dp, u32 pe[4], u32 vs[4],
  972. u32 pc[4],
  973. const struct tegra_dp_link_config *cfg)
  974. {
  975. u32 n_lanes = cfg->lane_count;
  976. u8 pc_supported = cfg->tps3_supported;
  977. int ret;
  978. u32 tp_src = training_pattern_2;
  979. if (pc_supported)
  980. tp_src = training_pattern_3;
  981. tegra_dp_tpg(dp, tp_src, n_lanes, cfg);
  982. ret = _tegra_dp_channel_eq(dp, pe, vs, pc, pc_supported, n_lanes, cfg);
  983. tegra_dp_tpg(dp, training_pattern_disabled, n_lanes, cfg);
  984. return ret;
  985. }
  986. static int _tegra_dp_clk_recovery(struct tegra_dp_priv *dp, u32 pe[4],
  987. u32 vs[4], u32 pc[4], u8 pc_supported,
  988. u32 n_lanes,
  989. const struct tegra_dp_link_config *cfg)
  990. {
  991. u32 vs_temp[4];
  992. u32 retry_cnt = 0;
  993. do {
  994. tegra_dp_lt_config(dp, pe, vs, pc, cfg);
  995. tegra_dp_wait_aux_training(dp, true, cfg);
  996. if (tegra_dp_clock_recovery_status(dp, cfg))
  997. return 0;
  998. memcpy(vs_temp, vs, sizeof(vs_temp));
  999. tegra_dp_lt_adjust(dp, pe, vs, pc, pc_supported, cfg);
  1000. if (memcmp(vs_temp, vs, sizeof(vs_temp)))
  1001. retry_cnt = 0;
  1002. else
  1003. ++retry_cnt;
  1004. } while (retry_cnt < 5);
  1005. return -EIO;
  1006. }
  1007. static int tegra_dp_clk_recovery(struct tegra_dp_priv *dp, u32 pe[4],
  1008. u32 vs[4], u32 pc[4],
  1009. const struct tegra_dp_link_config *cfg)
  1010. {
  1011. u32 n_lanes = cfg->lane_count;
  1012. u8 pc_supported = cfg->tps3_supported;
  1013. int err;
  1014. tegra_dp_tpg(dp, training_pattern_1, n_lanes, cfg);
  1015. err = _tegra_dp_clk_recovery(dp, pe, vs, pc, pc_supported, n_lanes,
  1016. cfg);
  1017. if (err < 0)
  1018. tegra_dp_tpg(dp, training_pattern_disabled, n_lanes, cfg);
  1019. return err;
  1020. }
  1021. static int tegra_dc_dp_full_link_training(struct tegra_dp_priv *dp,
  1022. const struct display_timing *timing,
  1023. struct tegra_dp_link_config *cfg)
  1024. {
  1025. struct udevice *sor = dp->sor;
  1026. int err;
  1027. u32 pe[4], vs[4], pc[4];
  1028. tegra_sor_precharge_lanes(sor, cfg);
  1029. retry_cr:
  1030. memset(pe, PREEMPHASIS_DISABLED, sizeof(pe));
  1031. memset(vs, DRIVECURRENT_LEVEL0, sizeof(vs));
  1032. memset(pc, POSTCURSOR2_LEVEL0, sizeof(pc));
  1033. err = tegra_dp_clk_recovery(dp, pe, vs, pc, cfg);
  1034. if (err) {
  1035. if (!tegra_dp_lower_link_config(dp, timing, cfg))
  1036. goto retry_cr;
  1037. debug("dp: clk recovery failed\n");
  1038. goto fail;
  1039. }
  1040. err = tegra_dp_channel_eq(dp, pe, vs, pc, cfg);
  1041. if (err) {
  1042. if (!tegra_dp_lower_link_config(dp, timing, cfg))
  1043. goto retry_cr;
  1044. debug("dp: channel equalization failed\n");
  1045. goto fail;
  1046. }
  1047. #ifdef DEBUG
  1048. tegra_dc_dp_dump_link_cfg(dp, cfg);
  1049. #endif
  1050. return 0;
  1051. fail:
  1052. return err;
  1053. }
  1054. /*
  1055. * All link training functions are ported from kernel dc driver.
  1056. * See more details at drivers/video/tegra/dc/dp.c
  1057. */
  1058. static int tegra_dc_dp_fast_link_training(struct tegra_dp_priv *dp,
  1059. const struct tegra_dp_link_config *link_cfg,
  1060. struct udevice *sor)
  1061. {
  1062. u8 link_bw;
  1063. u8 lane_count;
  1064. u16 data16;
  1065. u32 data32;
  1066. u32 size;
  1067. u32 status;
  1068. int j;
  1069. u32 mask = 0xffff >> ((4 - link_cfg->lane_count) * 4);
  1070. tegra_dc_sor_set_lane_parm(sor, link_cfg);
  1071. tegra_dc_dp_dpcd_write(dp, DP_MAIN_LINK_CHANNEL_CODING_SET,
  1072. DP_SET_ANSI_8B10B);
  1073. /* Send TP1 */
  1074. tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_1, link_cfg);
  1075. tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET,
  1076. DP_TRAINING_PATTERN_1);
  1077. for (j = 0; j < link_cfg->lane_count; ++j)
  1078. tegra_dc_dp_dpcd_write(dp, DP_TRAINING_LANE0_SET + j, 0x24);
  1079. udelay(520);
  1080. size = sizeof(data16);
  1081. tegra_dc_dpaux_read(dp, DPAUX_DP_AUXCTL_CMD_AUXRD,
  1082. DP_LANE0_1_STATUS, (u8 *)&data16, &size, &status);
  1083. status = mask & 0x1111;
  1084. if ((data16 & status) != status) {
  1085. debug("dp: Link training error for TP1 (%#x, status %#x)\n",
  1086. data16, status);
  1087. return -EFAULT;
  1088. }
  1089. /* enable ASSR */
  1090. tegra_dc_dp_set_assr(dp, sor, link_cfg->scramble_ena);
  1091. tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_3, link_cfg);
  1092. tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET,
  1093. link_cfg->link_bw == 20 ? 0x23 : 0x22);
  1094. for (j = 0; j < link_cfg->lane_count; ++j)
  1095. tegra_dc_dp_dpcd_write(dp, DP_TRAINING_LANE0_SET + j, 0x24);
  1096. udelay(520);
  1097. size = sizeof(data32);
  1098. tegra_dc_dpaux_read(dp, DPAUX_DP_AUXCTL_CMD_AUXRD, DP_LANE0_1_STATUS,
  1099. (u8 *)&data32, &size, &status);
  1100. if ((data32 & mask) != (0x7777 & mask)) {
  1101. debug("dp: Link training error for TP2/3 (0x%x)\n", data32);
  1102. return -EFAULT;
  1103. }
  1104. tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_disabled,
  1105. link_cfg);
  1106. tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET, 0);
  1107. if (tegra_dc_dp_link_trained(dp, link_cfg)) {
  1108. tegra_dc_sor_read_link_config(sor, &link_bw, &lane_count);
  1109. debug("Fast link training failed, link bw %d, lane # %d\n",
  1110. link_bw, lane_count);
  1111. return -EFAULT;
  1112. }
  1113. debug("Fast link training succeeded, link bw %d, lane %d\n",
  1114. link_cfg->link_bw, link_cfg->lane_count);
  1115. return 0;
  1116. }
  1117. static int tegra_dp_do_link_training(struct tegra_dp_priv *dp,
  1118. struct tegra_dp_link_config *link_cfg,
  1119. const struct display_timing *timing,
  1120. struct udevice *sor)
  1121. {
  1122. u8 link_bw;
  1123. u8 lane_count;
  1124. int ret;
  1125. if (DO_FAST_LINK_TRAINING) {
  1126. ret = tegra_dc_dp_fast_link_training(dp, link_cfg, sor);
  1127. if (ret) {
  1128. debug("dp: fast link training failed\n");
  1129. } else {
  1130. /*
  1131. * set to a known-good drive setting if fast link
  1132. * succeeded. Ignore any error.
  1133. */
  1134. ret = tegra_dc_sor_set_voltage_swing(dp->sor, link_cfg);
  1135. if (ret)
  1136. debug("Failed to set voltage swing\n");
  1137. }
  1138. } else {
  1139. ret = -ENOSYS;
  1140. }
  1141. if (ret) {
  1142. /* Try full link training then */
  1143. ret = tegra_dc_dp_full_link_training(dp, timing, link_cfg);
  1144. if (ret) {
  1145. debug("dp: full link training failed\n");
  1146. return ret;
  1147. }
  1148. }
  1149. /* Everything is good; double check the link config */
  1150. tegra_dc_sor_read_link_config(sor, &link_bw, &lane_count);
  1151. if ((link_cfg->link_bw == link_bw) &&
  1152. (link_cfg->lane_count == lane_count))
  1153. return 0;
  1154. else
  1155. return -EFAULT;
  1156. }
  1157. static int tegra_dc_dp_explore_link_cfg(struct tegra_dp_priv *dp,
  1158. struct tegra_dp_link_config *link_cfg,
  1159. struct udevice *sor,
  1160. const struct display_timing *timing)
  1161. {
  1162. struct tegra_dp_link_config temp_cfg;
  1163. if (!timing->pixelclock.typ || !timing->hactive.typ ||
  1164. !timing->vactive.typ) {
  1165. debug("dp: error mode configuration");
  1166. return -EINVAL;
  1167. }
  1168. if (!link_cfg->max_link_bw || !link_cfg->max_lane_count) {
  1169. debug("dp: error link configuration");
  1170. return -EINVAL;
  1171. }
  1172. link_cfg->is_valid = 0;
  1173. memcpy(&temp_cfg, link_cfg, sizeof(temp_cfg));
  1174. temp_cfg.link_bw = temp_cfg.max_link_bw;
  1175. temp_cfg.lane_count = temp_cfg.max_lane_count;
  1176. /*
  1177. * set to max link config
  1178. */
  1179. if ((!tegra_dc_dp_calc_config(dp, timing, &temp_cfg)) &&
  1180. (!tegra_dp_link_config(dp, &temp_cfg)) &&
  1181. (!tegra_dp_do_link_training(dp, &temp_cfg, timing, sor)))
  1182. /* the max link cfg is doable */
  1183. memcpy(link_cfg, &temp_cfg, sizeof(temp_cfg));
  1184. return link_cfg->is_valid ? 0 : -EFAULT;
  1185. }
  1186. static int tegra_dp_hpd_plug(struct tegra_dp_priv *dp)
  1187. {
  1188. const int vdd_to_hpd_delay_ms = 200;
  1189. u32 val;
  1190. ulong start;
  1191. start = get_timer(0);
  1192. do {
  1193. val = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
  1194. if (val & DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)
  1195. return 0;
  1196. udelay(100);
  1197. } while (get_timer(start) < vdd_to_hpd_delay_ms);
  1198. return -EIO;
  1199. }
  1200. static int tegra_dc_dp_sink_out_of_sync(struct tegra_dp_priv *dp, u32 delay_ms)
  1201. {
  1202. u8 dpcd_data;
  1203. int out_of_sync;
  1204. int ret;
  1205. debug("%s: delay=%d\n", __func__, delay_ms);
  1206. mdelay(delay_ms);
  1207. ret = tegra_dc_dp_dpcd_read(dp, DP_SINK_STATUS, &dpcd_data);
  1208. if (ret)
  1209. return ret;
  1210. out_of_sync = !(dpcd_data & DP_SINK_STATUS_PORT0_IN_SYNC);
  1211. if (out_of_sync)
  1212. debug("SINK receive port 0 out of sync, data=%x\n", dpcd_data);
  1213. else
  1214. debug("SINK is in synchronization\n");
  1215. return out_of_sync;
  1216. }
  1217. static int tegra_dc_dp_check_sink(struct tegra_dp_priv *dp,
  1218. struct tegra_dp_link_config *link_cfg,
  1219. const struct display_timing *timing)
  1220. {
  1221. const int max_retry = 5;
  1222. int delay_frame;
  1223. int retries;
  1224. /*
  1225. * DP TCON may skip some main stream frames, thus we need to wait
  1226. * some delay before reading the DPCD SINK STATUS register, starting
  1227. * from 5
  1228. */
  1229. delay_frame = 5;
  1230. retries = max_retry;
  1231. do {
  1232. int ret;
  1233. if (!tegra_dc_dp_sink_out_of_sync(dp, link_cfg->frame_in_ms *
  1234. delay_frame))
  1235. return 0;
  1236. debug("%s: retries left %d\n", __func__, retries);
  1237. if (!retries--) {
  1238. printf("DP: Out of sync after %d retries\n", max_retry);
  1239. return -EIO;
  1240. }
  1241. ret = tegra_dc_sor_detach(dp->dc_dev, dp->sor);
  1242. if (ret)
  1243. return ret;
  1244. if (tegra_dc_dp_explore_link_cfg(dp, link_cfg, dp->sor,
  1245. timing)) {
  1246. debug("dp: %s: error to configure link\n", __func__);
  1247. continue;
  1248. }
  1249. tegra_dc_sor_set_power_state(dp->sor, 1);
  1250. tegra_dc_sor_attach(dp->dc_dev, dp->sor, link_cfg, timing);
  1251. /* Increase delay_frame for next try in case the sink is
  1252. skipping more frames */
  1253. delay_frame += 10;
  1254. } while (1);
  1255. }
  1256. int tegra_dp_enable(struct udevice *dev, int panel_bpp,
  1257. const struct display_timing *timing)
  1258. {
  1259. struct tegra_dp_priv *priv = dev_get_priv(dev);
  1260. struct tegra_dp_link_config slink_cfg, *link_cfg = &slink_cfg;
  1261. struct udevice *sor;
  1262. int data;
  1263. int retry;
  1264. int ret;
  1265. memset(link_cfg, '\0', sizeof(*link_cfg));
  1266. link_cfg->is_valid = 0;
  1267. link_cfg->scramble_ena = 1;
  1268. tegra_dc_dpaux_enable(priv);
  1269. if (tegra_dp_hpd_plug(priv) < 0) {
  1270. debug("dp: hpd plug failed\n");
  1271. return -EIO;
  1272. }
  1273. link_cfg->bits_per_pixel = panel_bpp;
  1274. if (tegra_dc_dp_init_max_link_cfg(timing, priv, link_cfg)) {
  1275. debug("dp: failed to init link configuration\n");
  1276. return -ENOLINK;
  1277. }
  1278. ret = uclass_first_device(UCLASS_VIDEO_BRIDGE, &sor);
  1279. if (ret || !sor) {
  1280. debug("dp: failed to find SOR device: ret=%d\n", ret);
  1281. return ret;
  1282. }
  1283. priv->sor = sor;
  1284. ret = tegra_dc_sor_enable_dp(sor, link_cfg);
  1285. if (ret)
  1286. return ret;
  1287. tegra_dc_sor_set_panel_power(sor, 1);
  1288. /* Write power on to DPCD */
  1289. data = DP_SET_POWER_D0;
  1290. retry = 0;
  1291. do {
  1292. ret = tegra_dc_dp_dpcd_write(priv, DP_SET_POWER, data);
  1293. } while ((retry++ < DP_POWER_ON_MAX_TRIES) && ret);
  1294. if (ret || retry >= DP_POWER_ON_MAX_TRIES) {
  1295. debug("dp: failed to power on panel (0x%x)\n", ret);
  1296. return -ENETUNREACH;
  1297. goto error_enable;
  1298. }
  1299. /* Confirm DP plugging status */
  1300. if (!(tegra_dpaux_readl(priv, DPAUX_DP_AUXSTAT) &
  1301. DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)) {
  1302. debug("dp: could not detect HPD\n");
  1303. return -ENXIO;
  1304. }
  1305. /* Check DP version */
  1306. if (tegra_dc_dp_dpcd_read(priv, DP_DPCD_REV, &priv->revision)) {
  1307. debug("dp: failed to read the revision number from sink\n");
  1308. return -EIO;
  1309. }
  1310. if (tegra_dc_dp_explore_link_cfg(priv, link_cfg, sor, timing)) {
  1311. debug("dp: error configuring link\n");
  1312. return -ENOMEDIUM;
  1313. }
  1314. tegra_dc_sor_set_power_state(sor, 1);
  1315. ret = tegra_dc_sor_attach(priv->dc_dev, sor, link_cfg, timing);
  1316. if (ret && ret != -EEXIST)
  1317. return ret;
  1318. /*
  1319. * This takes a long time, but can apparently resolve a failure to
  1320. * bring up the display correctly.
  1321. */
  1322. if (0) {
  1323. ret = tegra_dc_dp_check_sink(priv, link_cfg, timing);
  1324. if (ret)
  1325. return ret;
  1326. }
  1327. /* Power down the unused lanes to save power - a few hundred mW */
  1328. tegra_dc_sor_power_down_unused_lanes(sor, link_cfg);
  1329. ret = video_bridge_set_backlight(sor, 80);
  1330. if (ret) {
  1331. debug("dp: failed to set backlight\n");
  1332. return ret;
  1333. }
  1334. priv->enabled = true;
  1335. error_enable:
  1336. return 0;
  1337. }
  1338. static int tegra_dp_ofdata_to_platdata(struct udevice *dev)
  1339. {
  1340. struct tegra_dp_plat *plat = dev_get_platdata(dev);
  1341. plat->base = dev_get_addr(dev);
  1342. return 0;
  1343. }
  1344. static int tegra_dp_read_edid(struct udevice *dev, u8 *buf, int buf_size)
  1345. {
  1346. struct tegra_dp_priv *priv = dev_get_priv(dev);
  1347. const int tegra_edid_i2c_address = 0x50;
  1348. u32 aux_stat = 0;
  1349. tegra_dc_dpaux_enable(priv);
  1350. return tegra_dc_i2c_aux_read(priv, tegra_edid_i2c_address, 0, buf,
  1351. buf_size, &aux_stat);
  1352. }
  1353. static const struct dm_display_ops dp_tegra_ops = {
  1354. .read_edid = tegra_dp_read_edid,
  1355. .enable = tegra_dp_enable,
  1356. };
  1357. static int dp_tegra_probe(struct udevice *dev)
  1358. {
  1359. struct tegra_dp_plat *plat = dev_get_platdata(dev);
  1360. struct tegra_dp_priv *priv = dev_get_priv(dev);
  1361. struct display_plat *disp_uc_plat = dev_get_uclass_platdata(dev);
  1362. priv->regs = (struct dpaux_ctlr *)plat->base;
  1363. priv->enabled = false;
  1364. /* Remember the display controller that is sending us video */
  1365. priv->dc_dev = disp_uc_plat->src_dev;
  1366. return 0;
  1367. }
  1368. static const struct udevice_id tegra_dp_ids[] = {
  1369. { .compatible = "nvidia,tegra124-dpaux" },
  1370. { }
  1371. };
  1372. U_BOOT_DRIVER(dp_tegra) = {
  1373. .name = "dpaux_tegra",
  1374. .id = UCLASS_DISPLAY,
  1375. .of_match = tegra_dp_ids,
  1376. .ofdata_to_platdata = tegra_dp_ofdata_to_platdata,
  1377. .probe = dp_tegra_probe,
  1378. .ops = &dp_tegra_ops,
  1379. .priv_auto_alloc_size = sizeof(struct tegra_dp_priv),
  1380. .platdata_auto_alloc_size = sizeof(struct tegra_dp_plat),
  1381. };