pcie-iproc-msi.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675
  1. /*
  2. * Copyright (C) 2015 Broadcom Corporation
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as
  6. * published by the Free Software Foundation version 2.
  7. *
  8. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  9. * kind, whether express or implied; without even the implied warranty
  10. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/interrupt.h>
  14. #include <linux/irqchip/chained_irq.h>
  15. #include <linux/irqdomain.h>
  16. #include <linux/msi.h>
  17. #include <linux/of_irq.h>
  18. #include <linux/of_pci.h>
  19. #include <linux/pci.h>
  20. #include "pcie-iproc.h"
  21. #define IPROC_MSI_INTR_EN_SHIFT 11
  22. #define IPROC_MSI_INTR_EN BIT(IPROC_MSI_INTR_EN_SHIFT)
  23. #define IPROC_MSI_INT_N_EVENT_SHIFT 1
  24. #define IPROC_MSI_INT_N_EVENT BIT(IPROC_MSI_INT_N_EVENT_SHIFT)
  25. #define IPROC_MSI_EQ_EN_SHIFT 0
  26. #define IPROC_MSI_EQ_EN BIT(IPROC_MSI_EQ_EN_SHIFT)
  27. #define IPROC_MSI_EQ_MASK 0x3f
  28. /* Max number of GIC interrupts */
  29. #define NR_HW_IRQS 6
  30. /* Number of entries in each event queue */
  31. #define EQ_LEN 64
  32. /* Size of each event queue memory region */
  33. #define EQ_MEM_REGION_SIZE SZ_4K
  34. /* Size of each MSI address region */
  35. #define MSI_MEM_REGION_SIZE SZ_4K
  36. enum iproc_msi_reg {
  37. IPROC_MSI_EQ_PAGE = 0,
  38. IPROC_MSI_EQ_PAGE_UPPER,
  39. IPROC_MSI_PAGE,
  40. IPROC_MSI_PAGE_UPPER,
  41. IPROC_MSI_CTRL,
  42. IPROC_MSI_EQ_HEAD,
  43. IPROC_MSI_EQ_TAIL,
  44. IPROC_MSI_INTS_EN,
  45. IPROC_MSI_REG_SIZE,
  46. };
  47. struct iproc_msi;
  48. /**
  49. * iProc MSI group
  50. *
  51. * One MSI group is allocated per GIC interrupt, serviced by one iProc MSI
  52. * event queue.
  53. *
  54. * @msi: pointer to iProc MSI data
  55. * @gic_irq: GIC interrupt
  56. * @eq: Event queue number
  57. */
  58. struct iproc_msi_grp {
  59. struct iproc_msi *msi;
  60. int gic_irq;
  61. unsigned int eq;
  62. };
  63. /**
  64. * iProc event queue based MSI
  65. *
  66. * Only meant to be used on platforms without MSI support integrated into the
  67. * GIC.
  68. *
  69. * @pcie: pointer to iProc PCIe data
  70. * @reg_offsets: MSI register offsets
  71. * @grps: MSI groups
  72. * @nr_irqs: number of total interrupts connected to GIC
  73. * @nr_cpus: number of toal CPUs
  74. * @has_inten_reg: indicates the MSI interrupt enable register needs to be
  75. * set explicitly (required for some legacy platforms)
  76. * @bitmap: MSI vector bitmap
  77. * @bitmap_lock: lock to protect access to the MSI bitmap
  78. * @nr_msi_vecs: total number of MSI vectors
  79. * @inner_domain: inner IRQ domain
  80. * @msi_domain: MSI IRQ domain
  81. * @nr_eq_region: required number of 4K aligned memory region for MSI event
  82. * queues
  83. * @nr_msi_region: required number of 4K aligned address region for MSI posted
  84. * writes
  85. * @eq_cpu: pointer to allocated memory region for MSI event queues
  86. * @eq_dma: DMA address of MSI event queues
  87. * @msi_addr: MSI address
  88. */
  89. struct iproc_msi {
  90. struct iproc_pcie *pcie;
  91. const u16 (*reg_offsets)[IPROC_MSI_REG_SIZE];
  92. struct iproc_msi_grp *grps;
  93. int nr_irqs;
  94. int nr_cpus;
  95. bool has_inten_reg;
  96. unsigned long *bitmap;
  97. struct mutex bitmap_lock;
  98. unsigned int nr_msi_vecs;
  99. struct irq_domain *inner_domain;
  100. struct irq_domain *msi_domain;
  101. unsigned int nr_eq_region;
  102. unsigned int nr_msi_region;
  103. void *eq_cpu;
  104. dma_addr_t eq_dma;
  105. phys_addr_t msi_addr;
  106. };
  107. static const u16 iproc_msi_reg_paxb[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = {
  108. { 0x200, 0x2c0, 0x204, 0x2c4, 0x210, 0x250, 0x254, 0x208 },
  109. { 0x200, 0x2c0, 0x204, 0x2c4, 0x214, 0x258, 0x25c, 0x208 },
  110. { 0x200, 0x2c0, 0x204, 0x2c4, 0x218, 0x260, 0x264, 0x208 },
  111. { 0x200, 0x2c0, 0x204, 0x2c4, 0x21c, 0x268, 0x26c, 0x208 },
  112. { 0x200, 0x2c0, 0x204, 0x2c4, 0x220, 0x270, 0x274, 0x208 },
  113. { 0x200, 0x2c0, 0x204, 0x2c4, 0x224, 0x278, 0x27c, 0x208 },
  114. };
  115. static const u16 iproc_msi_reg_paxc[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = {
  116. { 0xc00, 0xc04, 0xc08, 0xc0c, 0xc40, 0xc50, 0xc60 },
  117. { 0xc10, 0xc14, 0xc18, 0xc1c, 0xc44, 0xc54, 0xc64 },
  118. { 0xc20, 0xc24, 0xc28, 0xc2c, 0xc48, 0xc58, 0xc68 },
  119. { 0xc30, 0xc34, 0xc38, 0xc3c, 0xc4c, 0xc5c, 0xc6c },
  120. };
  121. static inline u32 iproc_msi_read_reg(struct iproc_msi *msi,
  122. enum iproc_msi_reg reg,
  123. unsigned int eq)
  124. {
  125. struct iproc_pcie *pcie = msi->pcie;
  126. return readl_relaxed(pcie->base + msi->reg_offsets[eq][reg]);
  127. }
  128. static inline void iproc_msi_write_reg(struct iproc_msi *msi,
  129. enum iproc_msi_reg reg,
  130. int eq, u32 val)
  131. {
  132. struct iproc_pcie *pcie = msi->pcie;
  133. writel_relaxed(val, pcie->base + msi->reg_offsets[eq][reg]);
  134. }
  135. static inline u32 hwirq_to_group(struct iproc_msi *msi, unsigned long hwirq)
  136. {
  137. return (hwirq % msi->nr_irqs);
  138. }
  139. static inline unsigned int iproc_msi_addr_offset(struct iproc_msi *msi,
  140. unsigned long hwirq)
  141. {
  142. if (msi->nr_msi_region > 1)
  143. return hwirq_to_group(msi, hwirq) * MSI_MEM_REGION_SIZE;
  144. else
  145. return hwirq_to_group(msi, hwirq) * sizeof(u32);
  146. }
  147. static inline unsigned int iproc_msi_eq_offset(struct iproc_msi *msi, u32 eq)
  148. {
  149. if (msi->nr_eq_region > 1)
  150. return eq * EQ_MEM_REGION_SIZE;
  151. else
  152. return eq * EQ_LEN * sizeof(u32);
  153. }
  154. static struct irq_chip iproc_msi_irq_chip = {
  155. .name = "iProc-MSI",
  156. };
  157. static struct msi_domain_info iproc_msi_domain_info = {
  158. .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
  159. MSI_FLAG_PCI_MSIX,
  160. .chip = &iproc_msi_irq_chip,
  161. };
  162. /*
  163. * In iProc PCIe core, each MSI group is serviced by a GIC interrupt and a
  164. * dedicated event queue. Each MSI group can support up to 64 MSI vectors.
  165. *
  166. * The number of MSI groups varies between different iProc SoCs. The total
  167. * number of CPU cores also varies. To support MSI IRQ affinity, we
  168. * distribute GIC interrupts across all available CPUs. MSI vector is moved
  169. * from one GIC interrupt to another to steer to the target CPU.
  170. *
  171. * Assuming:
  172. * - the number of MSI groups is M
  173. * - the number of CPU cores is N
  174. * - M is always a multiple of N
  175. *
  176. * Total number of raw MSI vectors = M * 64
  177. * Total number of supported MSI vectors = (M * 64) / N
  178. */
  179. static inline int hwirq_to_cpu(struct iproc_msi *msi, unsigned long hwirq)
  180. {
  181. return (hwirq % msi->nr_cpus);
  182. }
  183. static inline unsigned long hwirq_to_canonical_hwirq(struct iproc_msi *msi,
  184. unsigned long hwirq)
  185. {
  186. return (hwirq - hwirq_to_cpu(msi, hwirq));
  187. }
  188. static int iproc_msi_irq_set_affinity(struct irq_data *data,
  189. const struct cpumask *mask, bool force)
  190. {
  191. struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
  192. int target_cpu = cpumask_first(mask);
  193. int curr_cpu;
  194. curr_cpu = hwirq_to_cpu(msi, data->hwirq);
  195. if (curr_cpu == target_cpu)
  196. return IRQ_SET_MASK_OK_DONE;
  197. /* steer MSI to the target CPU */
  198. data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu;
  199. return IRQ_SET_MASK_OK;
  200. }
  201. static void iproc_msi_irq_compose_msi_msg(struct irq_data *data,
  202. struct msi_msg *msg)
  203. {
  204. struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
  205. dma_addr_t addr;
  206. addr = msi->msi_addr + iproc_msi_addr_offset(msi, data->hwirq);
  207. msg->address_lo = lower_32_bits(addr);
  208. msg->address_hi = upper_32_bits(addr);
  209. msg->data = data->hwirq;
  210. }
  211. static struct irq_chip iproc_msi_bottom_irq_chip = {
  212. .name = "MSI",
  213. .irq_set_affinity = iproc_msi_irq_set_affinity,
  214. .irq_compose_msi_msg = iproc_msi_irq_compose_msi_msg,
  215. };
  216. static int iproc_msi_irq_domain_alloc(struct irq_domain *domain,
  217. unsigned int virq, unsigned int nr_irqs,
  218. void *args)
  219. {
  220. struct iproc_msi *msi = domain->host_data;
  221. int hwirq;
  222. mutex_lock(&msi->bitmap_lock);
  223. /* Allocate 'nr_cpus' number of MSI vectors each time */
  224. hwirq = bitmap_find_next_zero_area(msi->bitmap, msi->nr_msi_vecs, 0,
  225. msi->nr_cpus, 0);
  226. if (hwirq < msi->nr_msi_vecs) {
  227. bitmap_set(msi->bitmap, hwirq, msi->nr_cpus);
  228. } else {
  229. mutex_unlock(&msi->bitmap_lock);
  230. return -ENOSPC;
  231. }
  232. mutex_unlock(&msi->bitmap_lock);
  233. irq_domain_set_info(domain, virq, hwirq, &iproc_msi_bottom_irq_chip,
  234. domain->host_data, handle_simple_irq, NULL, NULL);
  235. return 0;
  236. }
  237. static void iproc_msi_irq_domain_free(struct irq_domain *domain,
  238. unsigned int virq, unsigned int nr_irqs)
  239. {
  240. struct irq_data *data = irq_domain_get_irq_data(domain, virq);
  241. struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
  242. unsigned int hwirq;
  243. mutex_lock(&msi->bitmap_lock);
  244. hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq);
  245. bitmap_clear(msi->bitmap, hwirq, msi->nr_cpus);
  246. mutex_unlock(&msi->bitmap_lock);
  247. irq_domain_free_irqs_parent(domain, virq, nr_irqs);
  248. }
  249. static const struct irq_domain_ops msi_domain_ops = {
  250. .alloc = iproc_msi_irq_domain_alloc,
  251. .free = iproc_msi_irq_domain_free,
  252. };
  253. static inline u32 decode_msi_hwirq(struct iproc_msi *msi, u32 eq, u32 head)
  254. {
  255. u32 *msg, hwirq;
  256. unsigned int offs;
  257. offs = iproc_msi_eq_offset(msi, eq) + head * sizeof(u32);
  258. msg = (u32 *)(msi->eq_cpu + offs);
  259. hwirq = *msg & IPROC_MSI_EQ_MASK;
  260. /*
  261. * Since we have multiple hwirq mapped to a single MSI vector,
  262. * now we need to derive the hwirq at CPU0. It can then be used to
  263. * mapped back to virq.
  264. */
  265. return hwirq_to_canonical_hwirq(msi, hwirq);
  266. }
  267. static void iproc_msi_handler(struct irq_desc *desc)
  268. {
  269. struct irq_chip *chip = irq_desc_get_chip(desc);
  270. struct iproc_msi_grp *grp;
  271. struct iproc_msi *msi;
  272. struct iproc_pcie *pcie;
  273. u32 eq, head, tail, nr_events;
  274. unsigned long hwirq;
  275. int virq;
  276. chained_irq_enter(chip, desc);
  277. grp = irq_desc_get_handler_data(desc);
  278. msi = grp->msi;
  279. pcie = msi->pcie;
  280. eq = grp->eq;
  281. /*
  282. * iProc MSI event queue is tracked by head and tail pointers. Head
  283. * pointer indicates the next entry (MSI data) to be consumed by SW in
  284. * the queue and needs to be updated by SW. iProc MSI core uses the
  285. * tail pointer as the next data insertion point.
  286. *
  287. * Entries between head and tail pointers contain valid MSI data. MSI
  288. * data is guaranteed to be in the event queue memory before the tail
  289. * pointer is updated by the iProc MSI core.
  290. */
  291. head = iproc_msi_read_reg(msi, IPROC_MSI_EQ_HEAD,
  292. eq) & IPROC_MSI_EQ_MASK;
  293. do {
  294. tail = iproc_msi_read_reg(msi, IPROC_MSI_EQ_TAIL,
  295. eq) & IPROC_MSI_EQ_MASK;
  296. /*
  297. * Figure out total number of events (MSI data) to be
  298. * processed.
  299. */
  300. nr_events = (tail < head) ?
  301. (EQ_LEN - (head - tail)) : (tail - head);
  302. if (!nr_events)
  303. break;
  304. /* process all outstanding events */
  305. while (nr_events--) {
  306. hwirq = decode_msi_hwirq(msi, eq, head);
  307. virq = irq_find_mapping(msi->inner_domain, hwirq);
  308. generic_handle_irq(virq);
  309. head++;
  310. head %= EQ_LEN;
  311. }
  312. /*
  313. * Now all outstanding events have been processed. Update the
  314. * head pointer.
  315. */
  316. iproc_msi_write_reg(msi, IPROC_MSI_EQ_HEAD, eq, head);
  317. /*
  318. * Now go read the tail pointer again to see if there are new
  319. * oustanding events that came in during the above window.
  320. */
  321. } while (true);
  322. chained_irq_exit(chip, desc);
  323. }
  324. static void iproc_msi_enable(struct iproc_msi *msi)
  325. {
  326. int i, eq;
  327. u32 val;
  328. /* Program memory region for each event queue */
  329. for (i = 0; i < msi->nr_eq_region; i++) {
  330. dma_addr_t addr = msi->eq_dma + (i * EQ_MEM_REGION_SIZE);
  331. iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE, i,
  332. lower_32_bits(addr));
  333. iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE_UPPER, i,
  334. upper_32_bits(addr));
  335. }
  336. /* Program address region for MSI posted writes */
  337. for (i = 0; i < msi->nr_msi_region; i++) {
  338. phys_addr_t addr = msi->msi_addr + (i * MSI_MEM_REGION_SIZE);
  339. iproc_msi_write_reg(msi, IPROC_MSI_PAGE, i,
  340. lower_32_bits(addr));
  341. iproc_msi_write_reg(msi, IPROC_MSI_PAGE_UPPER, i,
  342. upper_32_bits(addr));
  343. }
  344. for (eq = 0; eq < msi->nr_irqs; eq++) {
  345. /* Enable MSI event queue */
  346. val = IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT |
  347. IPROC_MSI_EQ_EN;
  348. iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val);
  349. /*
  350. * Some legacy platforms require the MSI interrupt enable
  351. * register to be set explicitly.
  352. */
  353. if (msi->has_inten_reg) {
  354. val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq);
  355. val |= BIT(eq);
  356. iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val);
  357. }
  358. }
  359. }
  360. static void iproc_msi_disable(struct iproc_msi *msi)
  361. {
  362. u32 eq, val;
  363. for (eq = 0; eq < msi->nr_irqs; eq++) {
  364. if (msi->has_inten_reg) {
  365. val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq);
  366. val &= ~BIT(eq);
  367. iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val);
  368. }
  369. val = iproc_msi_read_reg(msi, IPROC_MSI_CTRL, eq);
  370. val &= ~(IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT |
  371. IPROC_MSI_EQ_EN);
  372. iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val);
  373. }
  374. }
  375. static int iproc_msi_alloc_domains(struct device_node *node,
  376. struct iproc_msi *msi)
  377. {
  378. msi->inner_domain = irq_domain_add_linear(NULL, msi->nr_msi_vecs,
  379. &msi_domain_ops, msi);
  380. if (!msi->inner_domain)
  381. return -ENOMEM;
  382. msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
  383. &iproc_msi_domain_info,
  384. msi->inner_domain);
  385. if (!msi->msi_domain) {
  386. irq_domain_remove(msi->inner_domain);
  387. return -ENOMEM;
  388. }
  389. return 0;
  390. }
  391. static void iproc_msi_free_domains(struct iproc_msi *msi)
  392. {
  393. if (msi->msi_domain)
  394. irq_domain_remove(msi->msi_domain);
  395. if (msi->inner_domain)
  396. irq_domain_remove(msi->inner_domain);
  397. }
  398. static void iproc_msi_irq_free(struct iproc_msi *msi, unsigned int cpu)
  399. {
  400. int i;
  401. for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) {
  402. irq_set_chained_handler_and_data(msi->grps[i].gic_irq,
  403. NULL, NULL);
  404. }
  405. }
  406. static int iproc_msi_irq_setup(struct iproc_msi *msi, unsigned int cpu)
  407. {
  408. int i, ret;
  409. cpumask_var_t mask;
  410. struct iproc_pcie *pcie = msi->pcie;
  411. for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) {
  412. irq_set_chained_handler_and_data(msi->grps[i].gic_irq,
  413. iproc_msi_handler,
  414. &msi->grps[i]);
  415. /* Dedicate GIC interrupt to each CPU core */
  416. if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
  417. cpumask_clear(mask);
  418. cpumask_set_cpu(cpu, mask);
  419. ret = irq_set_affinity(msi->grps[i].gic_irq, mask);
  420. if (ret)
  421. dev_err(pcie->dev,
  422. "failed to set affinity for IRQ%d\n",
  423. msi->grps[i].gic_irq);
  424. free_cpumask_var(mask);
  425. } else {
  426. dev_err(pcie->dev, "failed to alloc CPU mask\n");
  427. ret = -EINVAL;
  428. }
  429. if (ret) {
  430. /* Free all configured/unconfigured IRQs */
  431. iproc_msi_irq_free(msi, cpu);
  432. return ret;
  433. }
  434. }
  435. return 0;
  436. }
  437. int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
  438. {
  439. struct iproc_msi *msi;
  440. int i, ret;
  441. unsigned int cpu;
  442. if (!of_device_is_compatible(node, "brcm,iproc-msi"))
  443. return -ENODEV;
  444. if (!of_find_property(node, "msi-controller", NULL))
  445. return -ENODEV;
  446. if (pcie->msi)
  447. return -EBUSY;
  448. msi = devm_kzalloc(pcie->dev, sizeof(*msi), GFP_KERNEL);
  449. if (!msi)
  450. return -ENOMEM;
  451. msi->pcie = pcie;
  452. pcie->msi = msi;
  453. msi->msi_addr = pcie->base_addr;
  454. mutex_init(&msi->bitmap_lock);
  455. msi->nr_cpus = num_possible_cpus();
  456. msi->nr_irqs = of_irq_count(node);
  457. if (!msi->nr_irqs) {
  458. dev_err(pcie->dev, "found no MSI GIC interrupt\n");
  459. return -ENODEV;
  460. }
  461. if (msi->nr_irqs > NR_HW_IRQS) {
  462. dev_warn(pcie->dev, "too many MSI GIC interrupts defined %d\n",
  463. msi->nr_irqs);
  464. msi->nr_irqs = NR_HW_IRQS;
  465. }
  466. if (msi->nr_irqs < msi->nr_cpus) {
  467. dev_err(pcie->dev,
  468. "not enough GIC interrupts for MSI affinity\n");
  469. return -EINVAL;
  470. }
  471. if (msi->nr_irqs % msi->nr_cpus != 0) {
  472. msi->nr_irqs -= msi->nr_irqs % msi->nr_cpus;
  473. dev_warn(pcie->dev, "Reducing number of interrupts to %d\n",
  474. msi->nr_irqs);
  475. }
  476. switch (pcie->type) {
  477. case IPROC_PCIE_PAXB:
  478. msi->reg_offsets = iproc_msi_reg_paxb;
  479. msi->nr_eq_region = 1;
  480. msi->nr_msi_region = 1;
  481. break;
  482. case IPROC_PCIE_PAXC:
  483. msi->reg_offsets = iproc_msi_reg_paxc;
  484. msi->nr_eq_region = msi->nr_irqs;
  485. msi->nr_msi_region = msi->nr_irqs;
  486. break;
  487. default:
  488. dev_err(pcie->dev, "incompatible iProc PCIe interface\n");
  489. return -EINVAL;
  490. }
  491. if (of_find_property(node, "brcm,pcie-msi-inten", NULL))
  492. msi->has_inten_reg = true;
  493. msi->nr_msi_vecs = msi->nr_irqs * EQ_LEN;
  494. msi->bitmap = devm_kcalloc(pcie->dev, BITS_TO_LONGS(msi->nr_msi_vecs),
  495. sizeof(*msi->bitmap), GFP_KERNEL);
  496. if (!msi->bitmap)
  497. return -ENOMEM;
  498. msi->grps = devm_kcalloc(pcie->dev, msi->nr_irqs, sizeof(*msi->grps),
  499. GFP_KERNEL);
  500. if (!msi->grps)
  501. return -ENOMEM;
  502. for (i = 0; i < msi->nr_irqs; i++) {
  503. unsigned int irq = irq_of_parse_and_map(node, i);
  504. if (!irq) {
  505. dev_err(pcie->dev, "unable to parse/map interrupt\n");
  506. ret = -ENODEV;
  507. goto free_irqs;
  508. }
  509. msi->grps[i].gic_irq = irq;
  510. msi->grps[i].msi = msi;
  511. msi->grps[i].eq = i;
  512. }
  513. /* Reserve memory for event queue and make sure memories are zeroed */
  514. msi->eq_cpu = dma_zalloc_coherent(pcie->dev,
  515. msi->nr_eq_region * EQ_MEM_REGION_SIZE,
  516. &msi->eq_dma, GFP_KERNEL);
  517. if (!msi->eq_cpu) {
  518. ret = -ENOMEM;
  519. goto free_irqs;
  520. }
  521. ret = iproc_msi_alloc_domains(node, msi);
  522. if (ret) {
  523. dev_err(pcie->dev, "failed to create MSI domains\n");
  524. goto free_eq_dma;
  525. }
  526. for_each_online_cpu(cpu) {
  527. ret = iproc_msi_irq_setup(msi, cpu);
  528. if (ret)
  529. goto free_msi_irq;
  530. }
  531. iproc_msi_enable(msi);
  532. return 0;
  533. free_msi_irq:
  534. for_each_online_cpu(cpu)
  535. iproc_msi_irq_free(msi, cpu);
  536. iproc_msi_free_domains(msi);
  537. free_eq_dma:
  538. dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE,
  539. msi->eq_cpu, msi->eq_dma);
  540. free_irqs:
  541. for (i = 0; i < msi->nr_irqs; i++) {
  542. if (msi->grps[i].gic_irq)
  543. irq_dispose_mapping(msi->grps[i].gic_irq);
  544. }
  545. pcie->msi = NULL;
  546. return ret;
  547. }
  548. EXPORT_SYMBOL(iproc_msi_init);
  549. void iproc_msi_exit(struct iproc_pcie *pcie)
  550. {
  551. struct iproc_msi *msi = pcie->msi;
  552. unsigned int i, cpu;
  553. if (!msi)
  554. return;
  555. iproc_msi_disable(msi);
  556. for_each_online_cpu(cpu)
  557. iproc_msi_irq_free(msi, cpu);
  558. iproc_msi_free_domains(msi);
  559. dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE,
  560. msi->eq_cpu, msi->eq_dma);
  561. for (i = 0; i < msi->nr_irqs; i++) {
  562. if (msi->grps[i].gic_irq)
  563. irq_dispose_mapping(msi->grps[i].gic_irq);
  564. }
  565. }
  566. EXPORT_SYMBOL(iproc_msi_exit);