pci-dra7xx.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905
  1. /*
  2. * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs
  3. *
  4. * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com
  5. *
  6. * Authors: Kishon Vijay Abraham I <kishon@ti.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/delay.h>
  13. #include <linux/device.h>
  14. #include <linux/err.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/irq.h>
  17. #include <linux/irqdomain.h>
  18. #include <linux/kernel.h>
  19. #include <linux/init.h>
  20. #include <linux/of_device.h>
  21. #include <linux/of_gpio.h>
  22. #include <linux/of_pci.h>
  23. #include <linux/of_platform.h>
  24. #include <linux/pci.h>
  25. #include <linux/phy/phy.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/pm_runtime.h>
  28. #include <linux/resource.h>
  29. #include <linux/types.h>
  30. #include <linux/mfd/syscon.h>
  31. #include <linux/regmap.h>
  32. #include "pcie-designware.h"
  33. /* PCIe controller wrapper DRA7XX configuration registers */
  34. #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024
  35. #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028
  36. #define ERR_SYS BIT(0)
  37. #define ERR_FATAL BIT(1)
  38. #define ERR_NONFATAL BIT(2)
  39. #define ERR_COR BIT(3)
  40. #define ERR_AXI BIT(4)
  41. #define ERR_ECRC BIT(5)
  42. #define PME_TURN_OFF BIT(8)
  43. #define PME_TO_ACK BIT(9)
  44. #define PM_PME BIT(10)
  45. #define LINK_REQ_RST BIT(11)
  46. #define LINK_UP_EVT BIT(12)
  47. #define CFG_BME_EVT BIT(13)
  48. #define CFG_MSE_EVT BIT(14)
  49. #define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \
  50. ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \
  51. LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT)
  52. #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034
  53. #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038
  54. #define INTA BIT(0)
  55. #define INTB BIT(1)
  56. #define INTC BIT(2)
  57. #define INTD BIT(3)
  58. #define MSI BIT(4)
  59. #define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)
  60. #define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100
  61. #define DEVICE_TYPE_EP 0x0
  62. #define DEVICE_TYPE_LEG_EP 0x1
  63. #define DEVICE_TYPE_RC 0x4
  64. #define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104
  65. #define LTSSM_EN 0x1
  66. #define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C
  67. #define LINK_UP BIT(16)
  68. #define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF
  69. #define EXP_CAP_ID_OFFSET 0x70
  70. #define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124
  71. #define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128
  72. #define PCIECTRL_TI_CONF_MSI_XMT 0x012c
  73. #define MSI_REQ_GRANT BIT(0)
  74. #define MSI_VECTOR_SHIFT 7
  75. #define PCIE_1LANE_2LANE_SELECTION BIT(13)
  76. #define PCIE_B1C0_MODE_SEL BIT(2)
  77. struct dra7xx_pcie {
  78. struct dw_pcie *pci;
  79. void __iomem *base; /* DT ti_conf */
  80. int phy_count; /* DT phy-names count */
  81. struct phy **phy;
  82. int link_gen;
  83. struct irq_domain *irq_domain;
  84. enum dw_pcie_device_mode mode;
  85. };
  86. struct dra7xx_pcie_of_data {
  87. enum dw_pcie_device_mode mode;
  88. u32 b1co_mode_sel_mask;
  89. };
  90. struct dra7xx_pcie_data {
  91. };
  92. #define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev)
  93. static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset)
  94. {
  95. return readl(pcie->base + offset);
  96. }
  97. static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
  98. u32 value)
  99. {
  100. writel(value, pcie->base + offset);
  101. }
  102. static u64 dra7xx_pcie_cpu_addr_fixup(u64 pci_addr)
  103. {
  104. return pci_addr & DRA7XX_CPU_TO_BUS_ADDR;
  105. }
  106. static int dra7xx_pcie_link_up(struct dw_pcie *pci)
  107. {
  108. struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
  109. u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
  110. return !!(reg & LINK_UP);
  111. }
  112. static void dra7xx_pcie_stop_link(struct dw_pcie *pci)
  113. {
  114. struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
  115. u32 reg;
  116. reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
  117. reg &= ~LTSSM_EN;
  118. dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
  119. }
  120. static int dra7xx_pcie_establish_link(struct dw_pcie *pci)
  121. {
  122. struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
  123. struct device *dev = pci->dev;
  124. u32 reg;
  125. u32 exp_cap_off = EXP_CAP_ID_OFFSET;
  126. if (dw_pcie_link_up(pci)) {
  127. dev_err(dev, "link is already up\n");
  128. return 0;
  129. }
  130. if (dra7xx->link_gen == 1) {
  131. dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP,
  132. 4, &reg);
  133. if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
  134. reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
  135. reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
  136. dw_pcie_write(pci->dbi_base + exp_cap_off +
  137. PCI_EXP_LNKCAP, 4, reg);
  138. }
  139. dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2,
  140. 2, &reg);
  141. if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
  142. reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
  143. reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
  144. dw_pcie_write(pci->dbi_base + exp_cap_off +
  145. PCI_EXP_LNKCTL2, 2, reg);
  146. }
  147. }
  148. reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
  149. reg |= LTSSM_EN;
  150. dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
  151. return 0;
  152. }
  153. static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx)
  154. {
  155. dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
  156. ~LEG_EP_INTERRUPTS & ~MSI);
  157. dra7xx_pcie_writel(dra7xx,
  158. PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
  159. MSI | LEG_EP_INTERRUPTS);
  160. }
  161. static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx)
  162. {
  163. dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
  164. ~INTERRUPTS);
  165. dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN,
  166. INTERRUPTS);
  167. }
  168. static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
  169. {
  170. dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
  171. dra7xx_pcie_enable_msi_interrupts(dra7xx);
  172. }
  173. static void dra7xx_pcie_host_init(struct pcie_port *pp)
  174. {
  175. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  176. struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
  177. dw_pcie_setup_rc(pp);
  178. dra7xx_pcie_establish_link(pci);
  179. dw_pcie_wait_for_link(pci);
  180. dw_pcie_msi_init(pp);
  181. dra7xx_pcie_enable_interrupts(dra7xx);
  182. }
  183. static struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
  184. .host_init = dra7xx_pcie_host_init,
  185. };
  186. static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
  187. irq_hw_number_t hwirq)
  188. {
  189. irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
  190. irq_set_chip_data(irq, domain->host_data);
  191. return 0;
  192. }
  193. static const struct irq_domain_ops intx_domain_ops = {
  194. .map = dra7xx_pcie_intx_map,
  195. };
  196. static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
  197. {
  198. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  199. struct device *dev = pci->dev;
  200. struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
  201. struct device_node *node = dev->of_node;
  202. struct device_node *pcie_intc_node = of_get_next_child(node, NULL);
  203. if (!pcie_intc_node) {
  204. dev_err(dev, "No PCIe Intc node found\n");
  205. return -ENODEV;
  206. }
  207. dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, 4,
  208. &intx_domain_ops, pp);
  209. if (!dra7xx->irq_domain) {
  210. dev_err(dev, "Failed to get a INTx IRQ domain\n");
  211. return -ENODEV;
  212. }
  213. return 0;
  214. }
  215. static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
  216. {
  217. struct dra7xx_pcie *dra7xx = arg;
  218. struct dw_pcie *pci = dra7xx->pci;
  219. struct pcie_port *pp = &pci->pp;
  220. u32 reg;
  221. reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
  222. dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg);
  223. switch (reg) {
  224. case MSI:
  225. dw_handle_msi_irq(pp);
  226. break;
  227. case INTA:
  228. case INTB:
  229. case INTC:
  230. case INTD:
  231. generic_handle_irq(irq_find_mapping(dra7xx->irq_domain,
  232. ffs(reg)));
  233. break;
  234. }
  235. return IRQ_HANDLED;
  236. }
  237. static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
  238. {
  239. struct dra7xx_pcie *dra7xx = arg;
  240. struct dw_pcie *pci = dra7xx->pci;
  241. struct device *dev = pci->dev;
  242. struct dw_pcie_ep *ep = &pci->ep;
  243. u32 reg;
  244. reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
  245. if (reg & ERR_SYS)
  246. dev_dbg(dev, "System Error\n");
  247. if (reg & ERR_FATAL)
  248. dev_dbg(dev, "Fatal Error\n");
  249. if (reg & ERR_NONFATAL)
  250. dev_dbg(dev, "Non Fatal Error\n");
  251. if (reg & ERR_COR)
  252. dev_dbg(dev, "Correctable Error\n");
  253. if (reg & ERR_AXI)
  254. dev_dbg(dev, "AXI tag lookup fatal Error\n");
  255. if (reg & ERR_ECRC)
  256. dev_dbg(dev, "ECRC Error\n");
  257. if (reg & PME_TURN_OFF)
  258. dev_dbg(dev,
  259. "Power Management Event Turn-Off message received\n");
  260. if (reg & PME_TO_ACK)
  261. dev_dbg(dev,
  262. "Power Management Turn-Off Ack message received\n");
  263. if (reg & PM_PME)
  264. dev_dbg(dev, "PM Power Management Event message received\n");
  265. if (reg & LINK_REQ_RST)
  266. dev_dbg(dev, "Link Request Reset\n");
  267. if (reg & LINK_UP_EVT) {
  268. if (dra7xx->mode == DW_PCIE_EP_TYPE)
  269. dw_pcie_ep_linkup(ep);
  270. dev_dbg(dev, "Link-up state change\n");
  271. }
  272. if (reg & CFG_BME_EVT)
  273. dev_dbg(dev, "CFG 'Bus Master Enable' change\n");
  274. if (reg & CFG_MSE_EVT)
  275. dev_dbg(dev, "CFG 'Memory Space Enable' change\n");
  276. dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg);
  277. return IRQ_HANDLED;
  278. }
  279. static void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
  280. {
  281. u32 reg;
  282. reg = PCI_BASE_ADDRESS_0 + (4 * bar);
  283. dw_pcie_writel_dbi2(pci, reg, 0x0);
  284. dw_pcie_writel_dbi(pci, reg, 0x0);
  285. }
  286. static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
  287. {
  288. struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
  289. struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
  290. enum pci_barno bar;
  291. for (bar = BAR_0; bar <= BAR_5; bar++)
  292. dw_pcie_ep_reset_bar(pci, bar);
  293. dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
  294. }
  295. static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx)
  296. {
  297. dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1);
  298. mdelay(1);
  299. dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1);
  300. }
  301. static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
  302. u8 interrupt_num)
  303. {
  304. u32 reg;
  305. reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT;
  306. reg |= MSI_REQ_GRANT;
  307. dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg);
  308. }
  309. static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep,
  310. enum pci_epc_irq_type type, u8 interrupt_num)
  311. {
  312. struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
  313. struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
  314. switch (type) {
  315. case PCI_EPC_IRQ_LEGACY:
  316. dra7xx_pcie_raise_legacy_irq(dra7xx);
  317. break;
  318. case PCI_EPC_IRQ_MSI:
  319. dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num);
  320. break;
  321. default:
  322. dev_err(pci->dev, "UNKNOWN IRQ type\n");
  323. }
  324. return 0;
  325. }
  326. static struct dw_pcie_ep_ops pcie_ep_ops = {
  327. .ep_init = dra7xx_pcie_ep_init,
  328. .raise_irq = dra7xx_pcie_raise_irq,
  329. };
  330. static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
  331. struct platform_device *pdev)
  332. {
  333. int ret;
  334. struct dw_pcie_ep *ep;
  335. struct resource *res;
  336. struct device *dev = &pdev->dev;
  337. struct dw_pcie *pci = dra7xx->pci;
  338. ep = &pci->ep;
  339. ep->ops = &pcie_ep_ops;
  340. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics");
  341. pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
  342. if (!pci->dbi_base)
  343. return -ENOMEM;
  344. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2");
  345. pci->dbi_base2 = devm_ioremap(dev, res->start, resource_size(res));
  346. if (!pci->dbi_base2)
  347. return -ENOMEM;
  348. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
  349. if (!res)
  350. return -EINVAL;
  351. ep->phys_base = res->start;
  352. ep->addr_size = resource_size(res);
  353. ret = dw_pcie_ep_init(ep);
  354. if (ret) {
  355. dev_err(dev, "failed to initialize endpoint\n");
  356. return ret;
  357. }
  358. return 0;
  359. }
  360. static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
  361. struct platform_device *pdev)
  362. {
  363. int ret;
  364. struct pcie_port *pp;
  365. struct resource *res;
  366. struct device *dev = &pdev->dev;
  367. struct dw_pcie *pci = dra7xx->pci;
  368. pp = &pci->pp;
  369. pp->ops = &dra7xx_pcie_host_ops;
  370. pp->irq = platform_get_irq(pdev, 1);
  371. if (pp->irq < 0) {
  372. dev_err(dev, "missing IRQ resource\n");
  373. return -EINVAL;
  374. }
  375. ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler,
  376. IRQF_SHARED | IRQF_NO_THREAD,
  377. "dra7-pcie-msi", dra7xx);
  378. if (ret) {
  379. dev_err(dev, "failed to request irq\n");
  380. return ret;
  381. }
  382. ret = dra7xx_pcie_init_irq_domain(pp);
  383. if (ret < 0)
  384. return ret;
  385. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics");
  386. pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
  387. if (!pci->dbi_base)
  388. return -ENOMEM;
  389. ret = dw_pcie_host_init(pp);
  390. if (ret) {
  391. dev_err(dev, "failed to initialize host\n");
  392. return ret;
  393. }
  394. return 0;
  395. }
  396. static const struct dw_pcie_ops dw_pcie_ops = {
  397. .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup,
  398. .start_link = dra7xx_pcie_establish_link,
  399. .stop_link = dra7xx_pcie_stop_link,
  400. .link_up = dra7xx_pcie_link_up,
  401. };
  402. static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx)
  403. {
  404. int phy_count = dra7xx->phy_count;
  405. while (phy_count--) {
  406. phy_power_off(dra7xx->phy[phy_count]);
  407. phy_exit(dra7xx->phy[phy_count]);
  408. }
  409. }
  410. static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx)
  411. {
  412. int phy_count = dra7xx->phy_count;
  413. int ret;
  414. int i;
  415. for (i = 0; i < phy_count; i++) {
  416. ret = phy_init(dra7xx->phy[i]);
  417. if (ret < 0)
  418. goto err_phy;
  419. ret = phy_power_on(dra7xx->phy[i]);
  420. if (ret < 0) {
  421. phy_exit(dra7xx->phy[i]);
  422. goto err_phy;
  423. }
  424. }
  425. return 0;
  426. err_phy:
  427. while (--i >= 0) {
  428. phy_power_off(dra7xx->phy[i]);
  429. phy_exit(dra7xx->phy[i]);
  430. }
  431. return ret;
  432. }
  433. static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = {
  434. .mode = DW_PCIE_RC_TYPE,
  435. };
  436. static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = {
  437. .mode = DW_PCIE_EP_TYPE,
  438. };
  439. static const struct dra7xx_pcie_of_data dra746_pcie_rc_of_data = {
  440. .b1co_mode_sel_mask = BIT(2),
  441. .mode = DW_PCIE_RC_TYPE,
  442. };
  443. static const struct dra7xx_pcie_of_data dra746_pcie_ep_of_data = {
  444. .b1co_mode_sel_mask = BIT(2),
  445. .mode = DW_PCIE_EP_TYPE,
  446. };
  447. static const struct of_device_id of_dra7xx_pcie_match[] = {
  448. {
  449. .compatible = "ti,dra7-pcie",
  450. .data = &dra7xx_pcie_rc_of_data,
  451. },
  452. {
  453. .compatible = "ti,dra7-pcie-ep",
  454. .data = &dra7xx_pcie_ep_of_data,
  455. },
  456. {
  457. .compatible = "ti,dra746-pcie-rc",
  458. .data = &dra746_pcie_rc_of_data,
  459. },
  460. {
  461. .compatible = "ti,dra746-pcie-ep",
  462. .data = &dra746_pcie_ep_of_data,
  463. },
  464. {
  465. .compatible = "ti,dra726-pcie-rc",
  466. .data = &dra7xx_pcie_rc_of_data,
  467. },
  468. {
  469. .compatible = "ti,dra726-pcie-ep",
  470. .data = &dra7xx_pcie_ep_of_data,
  471. },
  472. {},
  473. };
  474. /*
  475. * dra7xx_pcie_ep_legacy_mode: workaround for AM572x/AM571x Errata i870
  476. * @dra7xx: the dra7xx device where the workaround should be applied
  477. *
  478. * Access to the PCIe slave port that are not 32-bit aligned will result
  479. * in incorrect mapping to TLP Address and Byte enable fields. Therefore,
  480. * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or
  481. * 0x3.
  482. *
  483. * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1.
  484. */
  485. static int dra7xx_pcie_ep_legacy_mode(struct device *dev)
  486. {
  487. int ret;
  488. struct device_node *np = dev->of_node;
  489. struct regmap *regmap;
  490. unsigned int reg;
  491. unsigned int field;
  492. regmap = syscon_regmap_lookup_by_phandle(np, "syscon-legacy-mode");
  493. if (IS_ERR(regmap)) {
  494. dev_dbg(dev, "can't get syscon-legacy-mode\n");
  495. return -EINVAL;
  496. }
  497. if (of_property_read_u32_index(np, "syscon-legacy-mode", 1, &reg)) {
  498. dev_err(dev, "couldn't get legacy mode register offset\n");
  499. return -EINVAL;
  500. }
  501. if (of_property_read_u32_index(np, "syscon-legacy-mode", 2, &field)) {
  502. dev_err(dev, "can't get bit field for setting legacy mode\n");
  503. return -EINVAL;
  504. }
  505. ret = regmap_update_bits(regmap, reg, field, field);
  506. if (ret)
  507. dev_err(dev, "failed to set legacy mode\n");
  508. return ret;
  509. }
  510. static int dra7xx_pcie_configure_two_lane(struct device *dev,
  511. u32 b1co_mode_sel_mask)
  512. {
  513. struct device_node *np = dev->of_node;
  514. struct regmap *pcie_syscon;
  515. unsigned int pcie_reg;
  516. pcie_syscon = syscon_regmap_lookup_by_phandle(np, "syscon-lane-conf");
  517. if (IS_ERR(pcie_syscon)) {
  518. dev_err(dev, "unable to get syscon-lane-conf\n");
  519. return -EINVAL;
  520. }
  521. if (of_property_read_u32_index(np, "syscon-lane-conf", 1, &pcie_reg)) {
  522. dev_err(dev, "couldn't get lane configuration reg offset\n");
  523. return -EINVAL;
  524. }
  525. regmap_update_bits(pcie_syscon, pcie_reg, PCIE_1LANE_2LANE_SELECTION,
  526. PCIE_1LANE_2LANE_SELECTION);
  527. pcie_syscon = syscon_regmap_lookup_by_phandle(np, "syscon-lane-sel");
  528. if (IS_ERR(pcie_syscon)) {
  529. dev_err(dev, "unable to get syscon-lane-sel\n");
  530. return -EINVAL;
  531. }
  532. if (of_property_read_u32_index(np, "syscon-lane-sel", 1, &pcie_reg)) {
  533. dev_err(dev, "couldn't get lane selection reg offset\n");
  534. return -EINVAL;
  535. }
  536. regmap_update_bits(pcie_syscon, pcie_reg, b1co_mode_sel_mask,
  537. PCIE_B1C0_MODE_SEL);
  538. return 0;
  539. }
  540. static int __init dra7xx_pcie_probe(struct platform_device *pdev)
  541. {
  542. u32 reg;
  543. int ret;
  544. int irq;
  545. int i;
  546. int phy_count;
  547. struct phy **phy;
  548. struct device_link **link;
  549. void __iomem *base;
  550. struct resource *res;
  551. struct dw_pcie *pci;
  552. struct dra7xx_pcie *dra7xx;
  553. struct device *dev = &pdev->dev;
  554. struct device_node *np = dev->of_node;
  555. char name[10];
  556. struct gpio_desc *reset;
  557. const struct of_device_id *match;
  558. const struct dra7xx_pcie_of_data *data;
  559. enum dw_pcie_device_mode mode;
  560. u32 b1co_mode_sel_mask;
  561. match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev);
  562. if (!match)
  563. return -EINVAL;
  564. data = (struct dra7xx_pcie_of_data *)match->data;
  565. mode = (enum dw_pcie_device_mode)data->mode;
  566. b1co_mode_sel_mask = data->b1co_mode_sel_mask;
  567. dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
  568. if (!dra7xx)
  569. return -ENOMEM;
  570. pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
  571. if (!pci)
  572. return -ENOMEM;
  573. pci->dev = dev;
  574. pci->ops = &dw_pcie_ops;
  575. irq = platform_get_irq(pdev, 0);
  576. if (irq < 0) {
  577. dev_err(dev, "missing IRQ resource\n");
  578. return -EINVAL;
  579. }
  580. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
  581. base = devm_ioremap_nocache(dev, res->start, resource_size(res));
  582. if (!base)
  583. return -ENOMEM;
  584. phy_count = of_property_count_strings(np, "phy-names");
  585. if (phy_count < 0) {
  586. dev_err(dev, "unable to find the strings\n");
  587. return phy_count;
  588. }
  589. phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL);
  590. if (!phy)
  591. return -ENOMEM;
  592. link = devm_kzalloc(dev, sizeof(*link) * phy_count, GFP_KERNEL);
  593. if (!link)
  594. return -ENOMEM;
  595. for (i = 0; i < phy_count; i++) {
  596. snprintf(name, sizeof(name), "pcie-phy%d", i);
  597. phy[i] = devm_phy_get(dev, name);
  598. if (IS_ERR(phy[i]))
  599. return PTR_ERR(phy[i]);
  600. link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
  601. if (!link[i]) {
  602. ret = -EINVAL;
  603. goto err_link;
  604. }
  605. }
  606. dra7xx->base = base;
  607. dra7xx->phy = phy;
  608. dra7xx->pci = pci;
  609. dra7xx->phy_count = phy_count;
  610. if (phy_count == 2) {
  611. ret = dra7xx_pcie_configure_two_lane(dev, b1co_mode_sel_mask);
  612. if (ret < 0)
  613. goto err_link;
  614. }
  615. ret = dra7xx_pcie_enable_phy(dra7xx);
  616. if (ret) {
  617. dev_err(dev, "failed to enable phy\n");
  618. return ret;
  619. }
  620. platform_set_drvdata(pdev, dra7xx);
  621. pm_runtime_enable(dev);
  622. ret = pm_runtime_get_sync(dev);
  623. if (ret < 0) {
  624. dev_err(dev, "pm_runtime_get_sync failed\n");
  625. goto err_get_sync;
  626. }
  627. reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH);
  628. if (IS_ERR(reset)) {
  629. ret = PTR_ERR(reset);
  630. dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret);
  631. goto err_gpio;
  632. }
  633. reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
  634. reg &= ~LTSSM_EN;
  635. dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
  636. dra7xx->link_gen = of_pci_get_max_link_speed(np);
  637. if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2)
  638. dra7xx->link_gen = 2;
  639. switch (mode) {
  640. case DW_PCIE_RC_TYPE:
  641. dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
  642. DEVICE_TYPE_RC);
  643. ret = dra7xx_add_pcie_port(dra7xx, pdev);
  644. if (ret < 0)
  645. goto err_gpio;
  646. break;
  647. case DW_PCIE_EP_TYPE:
  648. dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
  649. DEVICE_TYPE_EP);
  650. ret = dra7xx_pcie_ep_legacy_mode(dev);
  651. if (ret)
  652. goto err_gpio;
  653. ret = dra7xx_add_pcie_ep(dra7xx, pdev);
  654. if (ret < 0)
  655. goto err_gpio;
  656. break;
  657. default:
  658. dev_err(dev, "INVALID device type %d\n", mode);
  659. }
  660. dra7xx->mode = mode;
  661. ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
  662. IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
  663. if (ret) {
  664. dev_err(dev, "failed to request irq\n");
  665. goto err_gpio;
  666. }
  667. return 0;
  668. err_gpio:
  669. pm_runtime_put(dev);
  670. err_get_sync:
  671. pm_runtime_disable(dev);
  672. dra7xx_pcie_disable_phy(dra7xx);
  673. err_link:
  674. while (--i >= 0)
  675. device_link_del(link[i]);
  676. return ret;
  677. }
  678. #ifdef CONFIG_PM_SLEEP
  679. static int dra7xx_pcie_suspend(struct device *dev)
  680. {
  681. struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
  682. struct dw_pcie *pci = dra7xx->pci;
  683. u32 val;
  684. if (dra7xx->mode != DW_PCIE_RC_TYPE)
  685. return 0;
  686. /* clear MSE */
  687. val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
  688. val &= ~PCI_COMMAND_MEMORY;
  689. dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
  690. return 0;
  691. }
  692. static int dra7xx_pcie_resume(struct device *dev)
  693. {
  694. struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
  695. struct dw_pcie *pci = dra7xx->pci;
  696. u32 val;
  697. if (dra7xx->mode != DW_PCIE_RC_TYPE)
  698. return 0;
  699. /* set MSE */
  700. val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
  701. val |= PCI_COMMAND_MEMORY;
  702. dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
  703. return 0;
  704. }
  705. static int dra7xx_pcie_suspend_noirq(struct device *dev)
  706. {
  707. struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
  708. dra7xx_pcie_disable_phy(dra7xx);
  709. return 0;
  710. }
  711. static int dra7xx_pcie_resume_noirq(struct device *dev)
  712. {
  713. struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
  714. int ret;
  715. ret = dra7xx_pcie_enable_phy(dra7xx);
  716. if (ret) {
  717. dev_err(dev, "failed to enable phy\n");
  718. return ret;
  719. }
  720. return 0;
  721. }
  722. #endif
  723. static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
  724. SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
  725. SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
  726. dra7xx_pcie_resume_noirq)
  727. };
  728. static struct platform_driver dra7xx_pcie_driver = {
  729. .driver = {
  730. .name = "dra7-pcie",
  731. .of_match_table = of_dra7xx_pcie_match,
  732. .suppress_bind_attrs = true,
  733. .pm = &dra7xx_pcie_pm_ops,
  734. },
  735. };
  736. builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe);