pcie-designware-host.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633
  1. /*
  2. * Synopsys Designware PCIe host controller driver
  3. *
  4. * Copyright (C) 2013 Samsung Electronics Co., Ltd.
  5. * http://www.samsung.com
  6. *
  7. * Author: Jingoo Han <jg1.han@samsung.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/irqdomain.h>
  14. #include <linux/of_address.h>
  15. #include <linux/of_pci.h>
  16. #include <linux/pci_regs.h>
  17. #include <linux/platform_device.h>
  18. #include "pcie-designware.h"
  19. static struct pci_ops dw_pcie_ops;
  20. static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
  21. u32 *val)
  22. {
  23. struct dw_pcie *pci;
  24. if (pp->ops->rd_own_conf)
  25. return pp->ops->rd_own_conf(pp, where, size, val);
  26. pci = to_dw_pcie_from_pp(pp);
  27. return dw_pcie_read(pci->dbi_base + where, size, val);
  28. }
  29. static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
  30. u32 val)
  31. {
  32. struct dw_pcie *pci;
  33. if (pp->ops->wr_own_conf)
  34. return pp->ops->wr_own_conf(pp, where, size, val);
  35. pci = to_dw_pcie_from_pp(pp);
  36. return dw_pcie_write(pci->dbi_base + where, size, val);
  37. }
  38. static struct irq_chip dw_msi_irq_chip = {
  39. .name = "PCI-MSI",
  40. .irq_enable = pci_msi_unmask_irq,
  41. .irq_disable = pci_msi_mask_irq,
  42. .irq_mask = pci_msi_mask_irq,
  43. .irq_unmask = pci_msi_unmask_irq,
  44. };
  45. /* MSI int handler */
  46. irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
  47. {
  48. unsigned long val;
  49. int i, pos, irq;
  50. irqreturn_t ret = IRQ_NONE;
  51. for (i = 0; i < MAX_MSI_CTRLS; i++) {
  52. dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
  53. (u32 *)&val);
  54. if (val) {
  55. ret = IRQ_HANDLED;
  56. pos = 0;
  57. while ((pos = find_next_bit(&val, 32, pos)) != 32) {
  58. irq = irq_find_mapping(pp->irq_domain,
  59. i * 32 + pos);
  60. generic_handle_irq(irq);
  61. dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS +
  62. i * 12, 4, 1 << pos);
  63. pos++;
  64. }
  65. }
  66. }
  67. return ret;
  68. }
  69. void dw_pcie_msi_init(struct pcie_port *pp)
  70. {
  71. u64 msi_target;
  72. pp->msi_data = __get_free_pages(GFP_KERNEL, 0);
  73. msi_target = virt_to_phys((void *)pp->msi_data);
  74. /* program the msi_data */
  75. dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
  76. (u32)(msi_target & 0xffffffff));
  77. dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
  78. (u32)(msi_target >> 32 & 0xffffffff));
  79. }
  80. static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
  81. {
  82. unsigned int res, bit, val;
  83. res = (irq / 32) * 12;
  84. bit = irq % 32;
  85. dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
  86. val &= ~(1 << bit);
  87. dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
  88. }
  89. static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
  90. unsigned int nvec, unsigned int pos)
  91. {
  92. unsigned int i;
  93. for (i = 0; i < nvec; i++) {
  94. irq_set_msi_desc_off(irq_base, i, NULL);
  95. /* Disable corresponding interrupt on MSI controller */
  96. if (pp->ops->msi_clear_irq)
  97. pp->ops->msi_clear_irq(pp, pos + i);
  98. else
  99. dw_pcie_msi_clear_irq(pp, pos + i);
  100. }
  101. bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec));
  102. }
  103. static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
  104. {
  105. unsigned int res, bit, val;
  106. res = (irq / 32) * 12;
  107. bit = irq % 32;
  108. dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
  109. val |= 1 << bit;
  110. dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
  111. }
  112. static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
  113. {
  114. int irq, pos0, i;
  115. struct pcie_port *pp;
  116. pp = (struct pcie_port *)msi_desc_to_pci_sysdata(desc);
  117. pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
  118. order_base_2(no_irqs));
  119. if (pos0 < 0)
  120. goto no_valid_irq;
  121. irq = irq_find_mapping(pp->irq_domain, pos0);
  122. if (!irq)
  123. goto no_valid_irq;
  124. /*
  125. * irq_create_mapping (called from dw_pcie_host_init) pre-allocates
  126. * descs so there is no need to allocate descs here. We can therefore
  127. * assume that if irq_find_mapping above returns non-zero, then the
  128. * descs are also successfully allocated.
  129. */
  130. for (i = 0; i < no_irqs; i++) {
  131. if (irq_set_msi_desc_off(irq, i, desc) != 0) {
  132. clear_irq_range(pp, irq, i, pos0);
  133. goto no_valid_irq;
  134. }
  135. /*Enable corresponding interrupt in MSI interrupt controller */
  136. if (pp->ops->msi_set_irq)
  137. pp->ops->msi_set_irq(pp, pos0 + i);
  138. else
  139. dw_pcie_msi_set_irq(pp, pos0 + i);
  140. }
  141. *pos = pos0;
  142. desc->nvec_used = no_irqs;
  143. desc->msi_attrib.multiple = order_base_2(no_irqs);
  144. return irq;
  145. no_valid_irq:
  146. *pos = pos0;
  147. return -ENOSPC;
  148. }
  149. static void dw_msi_setup_msg(struct pcie_port *pp, unsigned int irq, u32 pos)
  150. {
  151. struct msi_msg msg;
  152. u64 msi_target;
  153. if (pp->ops->get_msi_addr)
  154. msi_target = pp->ops->get_msi_addr(pp);
  155. else
  156. msi_target = virt_to_phys((void *)pp->msi_data);
  157. msg.address_lo = (u32)(msi_target & 0xffffffff);
  158. msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff);
  159. if (pp->ops->get_msi_data)
  160. msg.data = pp->ops->get_msi_data(pp, pos);
  161. else
  162. msg.data = pos;
  163. pci_write_msi_msg(irq, &msg);
  164. }
  165. static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
  166. struct msi_desc *desc)
  167. {
  168. int irq, pos;
  169. struct pcie_port *pp = pdev->bus->sysdata;
  170. if (desc->msi_attrib.is_msix)
  171. return -EINVAL;
  172. irq = assign_irq(1, desc, &pos);
  173. if (irq < 0)
  174. return irq;
  175. dw_msi_setup_msg(pp, irq, pos);
  176. return 0;
  177. }
  178. static int dw_msi_setup_irqs(struct msi_controller *chip, struct pci_dev *pdev,
  179. int nvec, int type)
  180. {
  181. #ifdef CONFIG_PCI_MSI
  182. int irq, pos;
  183. struct msi_desc *desc;
  184. struct pcie_port *pp = pdev->bus->sysdata;
  185. /* MSI-X interrupts are not supported */
  186. if (type == PCI_CAP_ID_MSIX)
  187. return -EINVAL;
  188. WARN_ON(!list_is_singular(&pdev->dev.msi_list));
  189. desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
  190. irq = assign_irq(nvec, desc, &pos);
  191. if (irq < 0)
  192. return irq;
  193. dw_msi_setup_msg(pp, irq, pos);
  194. return 0;
  195. #else
  196. return -EINVAL;
  197. #endif
  198. }
  199. static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
  200. {
  201. struct irq_data *data = irq_get_irq_data(irq);
  202. struct msi_desc *msi = irq_data_get_msi_desc(data);
  203. struct pcie_port *pp = (struct pcie_port *)msi_desc_to_pci_sysdata(msi);
  204. clear_irq_range(pp, irq, 1, data->hwirq);
  205. }
  206. static struct msi_controller dw_pcie_msi_chip = {
  207. .setup_irq = dw_msi_setup_irq,
  208. .setup_irqs = dw_msi_setup_irqs,
  209. .teardown_irq = dw_msi_teardown_irq,
  210. };
  211. static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
  212. irq_hw_number_t hwirq)
  213. {
  214. irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
  215. irq_set_chip_data(irq, domain->host_data);
  216. return 0;
  217. }
  218. static const struct irq_domain_ops msi_domain_ops = {
  219. .map = dw_pcie_msi_map,
  220. };
  221. int dw_pcie_host_init(struct pcie_port *pp)
  222. {
  223. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  224. struct device *dev = pci->dev;
  225. struct device_node *np = dev->of_node;
  226. struct platform_device *pdev = to_platform_device(dev);
  227. struct pci_bus *bus, *child;
  228. struct resource *cfg_res;
  229. int i, ret;
  230. LIST_HEAD(res);
  231. struct resource_entry *win, *tmp;
  232. cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
  233. if (cfg_res) {
  234. pp->cfg0_size = resource_size(cfg_res) / 2;
  235. pp->cfg1_size = resource_size(cfg_res) / 2;
  236. pp->cfg0_base = cfg_res->start;
  237. pp->cfg1_base = cfg_res->start + pp->cfg0_size;
  238. } else if (!pp->va_cfg0_base) {
  239. dev_err(dev, "missing *config* reg space\n");
  240. }
  241. ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &pp->io_base);
  242. if (ret)
  243. return ret;
  244. ret = devm_request_pci_bus_resources(dev, &res);
  245. if (ret)
  246. goto error;
  247. /* Get the I/O and memory ranges from DT */
  248. resource_list_for_each_entry_safe(win, tmp, &res) {
  249. switch (resource_type(win->res)) {
  250. case IORESOURCE_IO:
  251. ret = pci_remap_iospace(win->res, pp->io_base);
  252. if (ret) {
  253. dev_warn(dev, "error %d: failed to map resource %pR\n",
  254. ret, win->res);
  255. resource_list_destroy_entry(win);
  256. } else {
  257. pp->io = win->res;
  258. pp->io->name = "I/O";
  259. pp->io_size = resource_size(pp->io);
  260. pp->io_bus_addr = pp->io->start - win->offset;
  261. }
  262. break;
  263. case IORESOURCE_MEM:
  264. pp->mem = win->res;
  265. pp->mem->name = "MEM";
  266. pp->mem_size = resource_size(pp->mem);
  267. pp->mem_bus_addr = pp->mem->start - win->offset;
  268. break;
  269. case 0:
  270. pp->cfg = win->res;
  271. pp->cfg0_size = resource_size(pp->cfg) / 2;
  272. pp->cfg1_size = resource_size(pp->cfg) / 2;
  273. pp->cfg0_base = pp->cfg->start;
  274. pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
  275. break;
  276. case IORESOURCE_BUS:
  277. pp->busn = win->res;
  278. break;
  279. }
  280. }
  281. if (!pci->dbi_base) {
  282. pci->dbi_base = devm_ioremap(dev, pp->cfg->start,
  283. resource_size(pp->cfg));
  284. if (!pci->dbi_base) {
  285. dev_err(dev, "error with ioremap\n");
  286. ret = -ENOMEM;
  287. goto error;
  288. }
  289. }
  290. pp->mem_base = pp->mem->start;
  291. if (!pp->va_cfg0_base) {
  292. pp->va_cfg0_base = devm_ioremap(dev, pp->cfg0_base,
  293. pp->cfg0_size);
  294. if (!pp->va_cfg0_base) {
  295. dev_err(dev, "error with ioremap in function\n");
  296. ret = -ENOMEM;
  297. goto error;
  298. }
  299. }
  300. if (!pp->va_cfg1_base) {
  301. pp->va_cfg1_base = devm_ioremap(dev, pp->cfg1_base,
  302. pp->cfg1_size);
  303. if (!pp->va_cfg1_base) {
  304. dev_err(dev, "error with ioremap\n");
  305. ret = -ENOMEM;
  306. goto error;
  307. }
  308. }
  309. ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport);
  310. if (ret)
  311. pci->num_viewport = 2;
  312. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  313. if (!pp->ops->msi_host_init) {
  314. pp->irq_domain = irq_domain_add_linear(dev->of_node,
  315. MAX_MSI_IRQS, &msi_domain_ops,
  316. &dw_pcie_msi_chip);
  317. if (!pp->irq_domain) {
  318. dev_err(dev, "irq domain init failed\n");
  319. ret = -ENXIO;
  320. goto error;
  321. }
  322. for (i = 0; i < MAX_MSI_IRQS; i++)
  323. irq_create_mapping(pp->irq_domain, i);
  324. } else {
  325. ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
  326. if (ret < 0)
  327. goto error;
  328. }
  329. }
  330. if (pp->ops->host_init)
  331. pp->ops->host_init(pp);
  332. pp->root_bus_nr = pp->busn->start;
  333. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  334. bus = pci_scan_root_bus_msi(dev, pp->root_bus_nr,
  335. &dw_pcie_ops, pp, &res,
  336. &dw_pcie_msi_chip);
  337. dw_pcie_msi_chip.dev = dev;
  338. } else
  339. bus = pci_scan_root_bus(dev, pp->root_bus_nr, &dw_pcie_ops,
  340. pp, &res);
  341. if (!bus) {
  342. ret = -ENOMEM;
  343. goto error;
  344. }
  345. if (pp->ops->scan_bus)
  346. pp->ops->scan_bus(pp);
  347. #ifdef CONFIG_ARM
  348. /* support old dtbs that incorrectly describe IRQs */
  349. pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
  350. #endif
  351. pci_bus_size_bridges(bus);
  352. pci_bus_assign_resources(bus);
  353. list_for_each_entry(child, &bus->children, node)
  354. pcie_bus_configure_settings(child);
  355. pci_bus_add_devices(bus);
  356. return 0;
  357. error:
  358. pci_free_resource_list(&res);
  359. return ret;
  360. }
  361. static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
  362. u32 devfn, int where, int size, u32 *val)
  363. {
  364. int ret, type;
  365. u32 busdev, cfg_size;
  366. u64 cpu_addr;
  367. void __iomem *va_cfg_base;
  368. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  369. if (pp->ops->rd_other_conf)
  370. return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val);
  371. busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
  372. PCIE_ATU_FUNC(PCI_FUNC(devfn));
  373. if (bus->parent->number == pp->root_bus_nr) {
  374. type = PCIE_ATU_TYPE_CFG0;
  375. cpu_addr = pp->cfg0_base;
  376. cfg_size = pp->cfg0_size;
  377. va_cfg_base = pp->va_cfg0_base;
  378. } else {
  379. type = PCIE_ATU_TYPE_CFG1;
  380. cpu_addr = pp->cfg1_base;
  381. cfg_size = pp->cfg1_size;
  382. va_cfg_base = pp->va_cfg1_base;
  383. }
  384. dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
  385. type, cpu_addr,
  386. busdev, cfg_size);
  387. ret = dw_pcie_read(va_cfg_base + where, size, val);
  388. if (pci->num_viewport <= 2)
  389. dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
  390. PCIE_ATU_TYPE_IO, pp->io_base,
  391. pp->io_bus_addr, pp->io_size);
  392. return ret;
  393. }
  394. static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
  395. u32 devfn, int where, int size, u32 val)
  396. {
  397. int ret, type;
  398. u32 busdev, cfg_size;
  399. u64 cpu_addr;
  400. void __iomem *va_cfg_base;
  401. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  402. if (pp->ops->wr_other_conf)
  403. return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val);
  404. busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
  405. PCIE_ATU_FUNC(PCI_FUNC(devfn));
  406. if (bus->parent->number == pp->root_bus_nr) {
  407. type = PCIE_ATU_TYPE_CFG0;
  408. cpu_addr = pp->cfg0_base;
  409. cfg_size = pp->cfg0_size;
  410. va_cfg_base = pp->va_cfg0_base;
  411. } else {
  412. type = PCIE_ATU_TYPE_CFG1;
  413. cpu_addr = pp->cfg1_base;
  414. cfg_size = pp->cfg1_size;
  415. va_cfg_base = pp->va_cfg1_base;
  416. }
  417. dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
  418. type, cpu_addr,
  419. busdev, cfg_size);
  420. ret = dw_pcie_write(va_cfg_base + where, size, val);
  421. if (pci->num_viewport <= 2)
  422. dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
  423. PCIE_ATU_TYPE_IO, pp->io_base,
  424. pp->io_bus_addr, pp->io_size);
  425. return ret;
  426. }
  427. static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
  428. int dev)
  429. {
  430. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  431. /* If there is no link, then there is no device */
  432. if (bus->number != pp->root_bus_nr) {
  433. if (!dw_pcie_link_up(pci))
  434. return 0;
  435. }
  436. /* access only one slot on each root port */
  437. if (bus->number == pp->root_bus_nr && dev > 0)
  438. return 0;
  439. return 1;
  440. }
  441. static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
  442. int size, u32 *val)
  443. {
  444. struct pcie_port *pp = bus->sysdata;
  445. if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) {
  446. *val = 0xffffffff;
  447. return PCIBIOS_DEVICE_NOT_FOUND;
  448. }
  449. if (bus->number == pp->root_bus_nr)
  450. return dw_pcie_rd_own_conf(pp, where, size, val);
  451. return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val);
  452. }
  453. static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
  454. int where, int size, u32 val)
  455. {
  456. struct pcie_port *pp = bus->sysdata;
  457. if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn)))
  458. return PCIBIOS_DEVICE_NOT_FOUND;
  459. if (bus->number == pp->root_bus_nr)
  460. return dw_pcie_wr_own_conf(pp, where, size, val);
  461. return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val);
  462. }
  463. static struct pci_ops dw_pcie_ops = {
  464. .read = dw_pcie_rd_conf,
  465. .write = dw_pcie_wr_conf,
  466. };
  467. static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
  468. {
  469. u32 val;
  470. val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
  471. if (val == 0xffffffff)
  472. return 1;
  473. return 0;
  474. }
  475. void dw_pcie_setup_rc(struct pcie_port *pp)
  476. {
  477. u32 val;
  478. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  479. dw_pcie_setup(pci);
  480. /* setup RC BARs */
  481. dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
  482. dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
  483. /* setup interrupt pins */
  484. val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
  485. val &= 0xffff00ff;
  486. val |= 0x00000100;
  487. dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
  488. /* setup bus numbers */
  489. val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
  490. val &= 0xff000000;
  491. val |= 0x00010100;
  492. dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
  493. /* setup command register */
  494. val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
  495. val &= 0xffff0000;
  496. val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
  497. PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
  498. dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
  499. /*
  500. * If the platform provides ->rd_other_conf, it means the platform
  501. * uses its own address translation component rather than ATU, so
  502. * we should not program the ATU here.
  503. */
  504. if (!pp->ops->rd_other_conf) {
  505. pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
  506. dev_dbg(pci->dev, "iATU unroll: %s\n",
  507. pci->iatu_unroll_enabled ? "enabled" : "disabled");
  508. dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
  509. PCIE_ATU_TYPE_MEM, pp->mem_base,
  510. pp->mem_bus_addr, pp->mem_size);
  511. if (pci->num_viewport > 2)
  512. dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2,
  513. PCIE_ATU_TYPE_IO, pp->io_base,
  514. pp->io_bus_addr, pp->io_size);
  515. }
  516. dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
  517. /* program correct class for RC */
  518. dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
  519. dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
  520. val |= PORT_LOGIC_SPEED_CHANGE;
  521. dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
  522. }