events_base.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725
  1. /*
  2. * Xen event channels
  3. *
  4. * Xen models interrupts with abstract event channels. Because each
  5. * domain gets 1024 event channels, but NR_IRQ is not that large, we
  6. * must dynamically map irqs<->event channels. The event channels
  7. * interface with the rest of the kernel by defining a xen interrupt
  8. * chip. When an event is received, it is mapped to an irq and sent
  9. * through the normal interrupt processing path.
  10. *
  11. * There are four kinds of events which can be mapped to an event
  12. * channel:
  13. *
  14. * 1. Inter-domain notifications. This includes all the virtual
  15. * device events, since they're driven by front-ends in another domain
  16. * (typically dom0).
  17. * 2. VIRQs, typically used for timers. These are per-cpu events.
  18. * 3. IPIs.
  19. * 4. PIRQs - Hardware interrupts.
  20. *
  21. * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
  22. */
  23. #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  24. #include <linux/linkage.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/irq.h>
  27. #include <linux/moduleparam.h>
  28. #include <linux/string.h>
  29. #include <linux/bootmem.h>
  30. #include <linux/slab.h>
  31. #include <linux/irqnr.h>
  32. #include <linux/pci.h>
  33. #ifdef CONFIG_X86
  34. #include <asm/desc.h>
  35. #include <asm/ptrace.h>
  36. #include <asm/irq.h>
  37. #include <asm/idle.h>
  38. #include <asm/io_apic.h>
  39. #include <asm/i8259.h>
  40. #include <asm/xen/pci.h>
  41. #endif
  42. #include <asm/sync_bitops.h>
  43. #include <asm/xen/hypercall.h>
  44. #include <asm/xen/hypervisor.h>
  45. #include <xen/page.h>
  46. #include <xen/xen.h>
  47. #include <xen/hvm.h>
  48. #include <xen/xen-ops.h>
  49. #include <xen/events.h>
  50. #include <xen/interface/xen.h>
  51. #include <xen/interface/event_channel.h>
  52. #include <xen/interface/hvm/hvm_op.h>
  53. #include <xen/interface/hvm/params.h>
  54. #include <xen/interface/physdev.h>
  55. #include <xen/interface/sched.h>
  56. #include <xen/interface/vcpu.h>
  57. #include <asm/hw_irq.h>
  58. #include "events_internal.h"
  59. const struct evtchn_ops *evtchn_ops;
  60. /*
  61. * This lock protects updates to the following mapping and reference-count
  62. * arrays. The lock does not need to be acquired to read the mapping tables.
  63. */
  64. static DEFINE_MUTEX(irq_mapping_update_lock);
  65. static LIST_HEAD(xen_irq_list_head);
  66. /* IRQ <-> VIRQ mapping. */
  67. static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
  68. /* IRQ <-> IPI mapping */
  69. static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
  70. int **evtchn_to_irq;
  71. #ifdef CONFIG_X86
  72. static unsigned long *pirq_eoi_map;
  73. #endif
  74. static bool (*pirq_needs_eoi)(unsigned irq);
  75. #define EVTCHN_ROW(e) (e / (PAGE_SIZE/sizeof(**evtchn_to_irq)))
  76. #define EVTCHN_COL(e) (e % (PAGE_SIZE/sizeof(**evtchn_to_irq)))
  77. #define EVTCHN_PER_ROW (PAGE_SIZE / sizeof(**evtchn_to_irq))
  78. /* Xen will never allocate port zero for any purpose. */
  79. #define VALID_EVTCHN(chn) ((chn) != 0)
  80. static struct irq_chip xen_dynamic_chip;
  81. static struct irq_chip xen_percpu_chip;
  82. static struct irq_chip xen_pirq_chip;
  83. static void enable_dynirq(struct irq_data *data);
  84. static void disable_dynirq(struct irq_data *data);
  85. static void clear_evtchn_to_irq_row(unsigned row)
  86. {
  87. unsigned col;
  88. for (col = 0; col < EVTCHN_PER_ROW; col++)
  89. evtchn_to_irq[row][col] = -1;
  90. }
  91. static void clear_evtchn_to_irq_all(void)
  92. {
  93. unsigned row;
  94. for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) {
  95. if (evtchn_to_irq[row] == NULL)
  96. continue;
  97. clear_evtchn_to_irq_row(row);
  98. }
  99. }
  100. static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
  101. {
  102. unsigned row;
  103. unsigned col;
  104. if (evtchn >= xen_evtchn_max_channels())
  105. return -EINVAL;
  106. row = EVTCHN_ROW(evtchn);
  107. col = EVTCHN_COL(evtchn);
  108. if (evtchn_to_irq[row] == NULL) {
  109. /* Unallocated irq entries return -1 anyway */
  110. if (irq == -1)
  111. return 0;
  112. evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL);
  113. if (evtchn_to_irq[row] == NULL)
  114. return -ENOMEM;
  115. clear_evtchn_to_irq_row(row);
  116. }
  117. evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq;
  118. return 0;
  119. }
  120. int get_evtchn_to_irq(unsigned evtchn)
  121. {
  122. if (evtchn >= xen_evtchn_max_channels())
  123. return -1;
  124. if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
  125. return -1;
  126. return evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)];
  127. }
  128. /* Get info for IRQ */
  129. struct irq_info *info_for_irq(unsigned irq)
  130. {
  131. return irq_get_handler_data(irq);
  132. }
  133. /* Constructors for packed IRQ information. */
  134. static int xen_irq_info_common_setup(struct irq_info *info,
  135. unsigned irq,
  136. enum xen_irq_type type,
  137. unsigned evtchn,
  138. unsigned short cpu)
  139. {
  140. int ret;
  141. BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
  142. info->type = type;
  143. info->irq = irq;
  144. info->evtchn = evtchn;
  145. info->cpu = cpu;
  146. ret = set_evtchn_to_irq(evtchn, irq);
  147. if (ret < 0)
  148. return ret;
  149. irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
  150. return xen_evtchn_port_setup(info);
  151. }
  152. static int xen_irq_info_evtchn_setup(unsigned irq,
  153. unsigned evtchn)
  154. {
  155. struct irq_info *info = info_for_irq(irq);
  156. return xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0);
  157. }
  158. static int xen_irq_info_ipi_setup(unsigned cpu,
  159. unsigned irq,
  160. unsigned evtchn,
  161. enum ipi_vector ipi)
  162. {
  163. struct irq_info *info = info_for_irq(irq);
  164. info->u.ipi = ipi;
  165. per_cpu(ipi_to_irq, cpu)[ipi] = irq;
  166. return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0);
  167. }
  168. static int xen_irq_info_virq_setup(unsigned cpu,
  169. unsigned irq,
  170. unsigned evtchn,
  171. unsigned virq)
  172. {
  173. struct irq_info *info = info_for_irq(irq);
  174. info->u.virq = virq;
  175. per_cpu(virq_to_irq, cpu)[virq] = irq;
  176. return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0);
  177. }
  178. static int xen_irq_info_pirq_setup(unsigned irq,
  179. unsigned evtchn,
  180. unsigned pirq,
  181. unsigned gsi,
  182. uint16_t domid,
  183. unsigned char flags)
  184. {
  185. struct irq_info *info = info_for_irq(irq);
  186. info->u.pirq.pirq = pirq;
  187. info->u.pirq.gsi = gsi;
  188. info->u.pirq.domid = domid;
  189. info->u.pirq.flags = flags;
  190. return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0);
  191. }
  192. static void xen_irq_info_cleanup(struct irq_info *info)
  193. {
  194. set_evtchn_to_irq(info->evtchn, -1);
  195. info->evtchn = 0;
  196. }
  197. /*
  198. * Accessors for packed IRQ information.
  199. */
  200. unsigned int evtchn_from_irq(unsigned irq)
  201. {
  202. if (unlikely(WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq)))
  203. return 0;
  204. return info_for_irq(irq)->evtchn;
  205. }
  206. unsigned irq_from_evtchn(unsigned int evtchn)
  207. {
  208. return get_evtchn_to_irq(evtchn);
  209. }
  210. EXPORT_SYMBOL_GPL(irq_from_evtchn);
  211. int irq_from_virq(unsigned int cpu, unsigned int virq)
  212. {
  213. return per_cpu(virq_to_irq, cpu)[virq];
  214. }
  215. static enum ipi_vector ipi_from_irq(unsigned irq)
  216. {
  217. struct irq_info *info = info_for_irq(irq);
  218. BUG_ON(info == NULL);
  219. BUG_ON(info->type != IRQT_IPI);
  220. return info->u.ipi;
  221. }
  222. static unsigned virq_from_irq(unsigned irq)
  223. {
  224. struct irq_info *info = info_for_irq(irq);
  225. BUG_ON(info == NULL);
  226. BUG_ON(info->type != IRQT_VIRQ);
  227. return info->u.virq;
  228. }
  229. static unsigned pirq_from_irq(unsigned irq)
  230. {
  231. struct irq_info *info = info_for_irq(irq);
  232. BUG_ON(info == NULL);
  233. BUG_ON(info->type != IRQT_PIRQ);
  234. return info->u.pirq.pirq;
  235. }
  236. static enum xen_irq_type type_from_irq(unsigned irq)
  237. {
  238. return info_for_irq(irq)->type;
  239. }
  240. unsigned cpu_from_irq(unsigned irq)
  241. {
  242. return info_for_irq(irq)->cpu;
  243. }
  244. unsigned int cpu_from_evtchn(unsigned int evtchn)
  245. {
  246. int irq = get_evtchn_to_irq(evtchn);
  247. unsigned ret = 0;
  248. if (irq != -1)
  249. ret = cpu_from_irq(irq);
  250. return ret;
  251. }
  252. #ifdef CONFIG_X86
  253. static bool pirq_check_eoi_map(unsigned irq)
  254. {
  255. return test_bit(pirq_from_irq(irq), pirq_eoi_map);
  256. }
  257. #endif
  258. static bool pirq_needs_eoi_flag(unsigned irq)
  259. {
  260. struct irq_info *info = info_for_irq(irq);
  261. BUG_ON(info->type != IRQT_PIRQ);
  262. return info->u.pirq.flags & PIRQ_NEEDS_EOI;
  263. }
  264. static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
  265. {
  266. int irq = get_evtchn_to_irq(chn);
  267. struct irq_info *info = info_for_irq(irq);
  268. BUG_ON(irq == -1);
  269. #ifdef CONFIG_SMP
  270. cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu));
  271. #endif
  272. xen_evtchn_port_bind_to_cpu(info, cpu);
  273. info->cpu = cpu;
  274. }
  275. static void xen_evtchn_mask_all(void)
  276. {
  277. unsigned int evtchn;
  278. for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++)
  279. mask_evtchn(evtchn);
  280. }
  281. /**
  282. * notify_remote_via_irq - send event to remote end of event channel via irq
  283. * @irq: irq of event channel to send event to
  284. *
  285. * Unlike notify_remote_via_evtchn(), this is safe to use across
  286. * save/restore. Notifications on a broken connection are silently
  287. * dropped.
  288. */
  289. void notify_remote_via_irq(int irq)
  290. {
  291. int evtchn = evtchn_from_irq(irq);
  292. if (VALID_EVTCHN(evtchn))
  293. notify_remote_via_evtchn(evtchn);
  294. }
  295. EXPORT_SYMBOL_GPL(notify_remote_via_irq);
  296. static void xen_irq_init(unsigned irq)
  297. {
  298. struct irq_info *info;
  299. #ifdef CONFIG_SMP
  300. /* By default all event channels notify CPU#0. */
  301. cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0));
  302. #endif
  303. info = kzalloc(sizeof(*info), GFP_KERNEL);
  304. if (info == NULL)
  305. panic("Unable to allocate metadata for IRQ%d\n", irq);
  306. info->type = IRQT_UNBOUND;
  307. info->refcnt = -1;
  308. irq_set_handler_data(irq, info);
  309. list_add_tail(&info->list, &xen_irq_list_head);
  310. }
  311. static int __must_check xen_allocate_irqs_dynamic(int nvec)
  312. {
  313. int i, irq = irq_alloc_descs(-1, 0, nvec, -1);
  314. if (irq >= 0) {
  315. for (i = 0; i < nvec; i++)
  316. xen_irq_init(irq + i);
  317. }
  318. return irq;
  319. }
  320. static inline int __must_check xen_allocate_irq_dynamic(void)
  321. {
  322. return xen_allocate_irqs_dynamic(1);
  323. }
  324. static int __must_check xen_allocate_irq_gsi(unsigned gsi)
  325. {
  326. int irq;
  327. /*
  328. * A PV guest has no concept of a GSI (since it has no ACPI
  329. * nor access to/knowledge of the physical APICs). Therefore
  330. * all IRQs are dynamically allocated from the entire IRQ
  331. * space.
  332. */
  333. if (xen_pv_domain() && !xen_initial_domain())
  334. return xen_allocate_irq_dynamic();
  335. /* Legacy IRQ descriptors are already allocated by the arch. */
  336. if (gsi < nr_legacy_irqs())
  337. irq = gsi;
  338. else
  339. irq = irq_alloc_desc_at(gsi, -1);
  340. xen_irq_init(irq);
  341. return irq;
  342. }
  343. static void xen_free_irq(unsigned irq)
  344. {
  345. struct irq_info *info = irq_get_handler_data(irq);
  346. if (WARN_ON(!info))
  347. return;
  348. list_del(&info->list);
  349. irq_set_handler_data(irq, NULL);
  350. WARN_ON(info->refcnt > 0);
  351. kfree(info);
  352. /* Legacy IRQ descriptors are managed by the arch. */
  353. if (irq < nr_legacy_irqs())
  354. return;
  355. irq_free_desc(irq);
  356. }
  357. static void xen_evtchn_close(unsigned int port)
  358. {
  359. struct evtchn_close close;
  360. close.port = port;
  361. if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
  362. BUG();
  363. }
  364. static void pirq_query_unmask(int irq)
  365. {
  366. struct physdev_irq_status_query irq_status;
  367. struct irq_info *info = info_for_irq(irq);
  368. BUG_ON(info->type != IRQT_PIRQ);
  369. irq_status.irq = pirq_from_irq(irq);
  370. if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
  371. irq_status.flags = 0;
  372. info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
  373. if (irq_status.flags & XENIRQSTAT_needs_eoi)
  374. info->u.pirq.flags |= PIRQ_NEEDS_EOI;
  375. }
  376. static void eoi_pirq(struct irq_data *data)
  377. {
  378. int evtchn = evtchn_from_irq(data->irq);
  379. struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
  380. int rc = 0;
  381. if (!VALID_EVTCHN(evtchn))
  382. return;
  383. if (unlikely(irqd_is_setaffinity_pending(data)) &&
  384. likely(!irqd_irq_disabled(data))) {
  385. int masked = test_and_set_mask(evtchn);
  386. clear_evtchn(evtchn);
  387. irq_move_masked_irq(data);
  388. if (!masked)
  389. unmask_evtchn(evtchn);
  390. } else
  391. clear_evtchn(evtchn);
  392. if (pirq_needs_eoi(data->irq)) {
  393. rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
  394. WARN_ON(rc);
  395. }
  396. }
  397. static void mask_ack_pirq(struct irq_data *data)
  398. {
  399. disable_dynirq(data);
  400. eoi_pirq(data);
  401. }
  402. static unsigned int __startup_pirq(unsigned int irq)
  403. {
  404. struct evtchn_bind_pirq bind_pirq;
  405. struct irq_info *info = info_for_irq(irq);
  406. int evtchn = evtchn_from_irq(irq);
  407. int rc;
  408. BUG_ON(info->type != IRQT_PIRQ);
  409. if (VALID_EVTCHN(evtchn))
  410. goto out;
  411. bind_pirq.pirq = pirq_from_irq(irq);
  412. /* NB. We are happy to share unless we are probing. */
  413. bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
  414. BIND_PIRQ__WILL_SHARE : 0;
  415. rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
  416. if (rc != 0) {
  417. pr_warn("Failed to obtain physical IRQ %d\n", irq);
  418. return 0;
  419. }
  420. evtchn = bind_pirq.port;
  421. pirq_query_unmask(irq);
  422. rc = set_evtchn_to_irq(evtchn, irq);
  423. if (rc)
  424. goto err;
  425. info->evtchn = evtchn;
  426. bind_evtchn_to_cpu(evtchn, 0);
  427. rc = xen_evtchn_port_setup(info);
  428. if (rc)
  429. goto err;
  430. out:
  431. unmask_evtchn(evtchn);
  432. eoi_pirq(irq_get_irq_data(irq));
  433. return 0;
  434. err:
  435. pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc);
  436. xen_evtchn_close(evtchn);
  437. return 0;
  438. }
  439. static unsigned int startup_pirq(struct irq_data *data)
  440. {
  441. return __startup_pirq(data->irq);
  442. }
  443. static void shutdown_pirq(struct irq_data *data)
  444. {
  445. unsigned int irq = data->irq;
  446. struct irq_info *info = info_for_irq(irq);
  447. unsigned evtchn = evtchn_from_irq(irq);
  448. BUG_ON(info->type != IRQT_PIRQ);
  449. if (!VALID_EVTCHN(evtchn))
  450. return;
  451. mask_evtchn(evtchn);
  452. xen_evtchn_close(evtchn);
  453. xen_irq_info_cleanup(info);
  454. }
  455. static void enable_pirq(struct irq_data *data)
  456. {
  457. startup_pirq(data);
  458. }
  459. static void disable_pirq(struct irq_data *data)
  460. {
  461. disable_dynirq(data);
  462. }
  463. int xen_irq_from_gsi(unsigned gsi)
  464. {
  465. struct irq_info *info;
  466. list_for_each_entry(info, &xen_irq_list_head, list) {
  467. if (info->type != IRQT_PIRQ)
  468. continue;
  469. if (info->u.pirq.gsi == gsi)
  470. return info->irq;
  471. }
  472. return -1;
  473. }
  474. EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
  475. static void __unbind_from_irq(unsigned int irq)
  476. {
  477. int evtchn = evtchn_from_irq(irq);
  478. struct irq_info *info = irq_get_handler_data(irq);
  479. if (info->refcnt > 0) {
  480. info->refcnt--;
  481. if (info->refcnt != 0)
  482. return;
  483. }
  484. if (VALID_EVTCHN(evtchn)) {
  485. unsigned int cpu = cpu_from_irq(irq);
  486. xen_evtchn_close(evtchn);
  487. switch (type_from_irq(irq)) {
  488. case IRQT_VIRQ:
  489. per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1;
  490. break;
  491. case IRQT_IPI:
  492. per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1;
  493. break;
  494. default:
  495. break;
  496. }
  497. xen_irq_info_cleanup(info);
  498. }
  499. BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
  500. xen_free_irq(irq);
  501. }
  502. /*
  503. * Do not make any assumptions regarding the relationship between the
  504. * IRQ number returned here and the Xen pirq argument.
  505. *
  506. * Note: We don't assign an event channel until the irq actually started
  507. * up. Return an existing irq if we've already got one for the gsi.
  508. *
  509. * Shareable implies level triggered, not shareable implies edge
  510. * triggered here.
  511. */
  512. int xen_bind_pirq_gsi_to_irq(unsigned gsi,
  513. unsigned pirq, int shareable, char *name)
  514. {
  515. int irq = -1;
  516. struct physdev_irq irq_op;
  517. int ret;
  518. mutex_lock(&irq_mapping_update_lock);
  519. irq = xen_irq_from_gsi(gsi);
  520. if (irq != -1) {
  521. pr_info("%s: returning irq %d for gsi %u\n",
  522. __func__, irq, gsi);
  523. goto out;
  524. }
  525. irq = xen_allocate_irq_gsi(gsi);
  526. if (irq < 0)
  527. goto out;
  528. irq_op.irq = irq;
  529. irq_op.vector = 0;
  530. /* Only the privileged domain can do this. For non-priv, the pcifront
  531. * driver provides a PCI bus that does the call to do exactly
  532. * this in the priv domain. */
  533. if (xen_initial_domain() &&
  534. HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
  535. xen_free_irq(irq);
  536. irq = -ENOSPC;
  537. goto out;
  538. }
  539. ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF,
  540. shareable ? PIRQ_SHAREABLE : 0);
  541. if (ret < 0) {
  542. __unbind_from_irq(irq);
  543. irq = ret;
  544. goto out;
  545. }
  546. pirq_query_unmask(irq);
  547. /* We try to use the handler with the appropriate semantic for the
  548. * type of interrupt: if the interrupt is an edge triggered
  549. * interrupt we use handle_edge_irq.
  550. *
  551. * On the other hand if the interrupt is level triggered we use
  552. * handle_fasteoi_irq like the native code does for this kind of
  553. * interrupts.
  554. *
  555. * Depending on the Xen version, pirq_needs_eoi might return true
  556. * not only for level triggered interrupts but for edge triggered
  557. * interrupts too. In any case Xen always honors the eoi mechanism,
  558. * not injecting any more pirqs of the same kind if the first one
  559. * hasn't received an eoi yet. Therefore using the fasteoi handler
  560. * is the right choice either way.
  561. */
  562. if (shareable)
  563. irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
  564. handle_fasteoi_irq, name);
  565. else
  566. irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
  567. handle_edge_irq, name);
  568. out:
  569. mutex_unlock(&irq_mapping_update_lock);
  570. return irq;
  571. }
  572. #ifdef CONFIG_PCI_MSI
  573. int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
  574. {
  575. int rc;
  576. struct physdev_get_free_pirq op_get_free_pirq;
  577. op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
  578. rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
  579. WARN_ONCE(rc == -ENOSYS,
  580. "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
  581. return rc ? -1 : op_get_free_pirq.pirq;
  582. }
  583. int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
  584. int pirq, int nvec, const char *name, domid_t domid)
  585. {
  586. int i, irq, ret;
  587. mutex_lock(&irq_mapping_update_lock);
  588. irq = xen_allocate_irqs_dynamic(nvec);
  589. if (irq < 0)
  590. goto out;
  591. for (i = 0; i < nvec; i++) {
  592. irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name);
  593. ret = xen_irq_info_pirq_setup(irq + i, 0, pirq + i, 0, domid,
  594. i == 0 ? 0 : PIRQ_MSI_GROUP);
  595. if (ret < 0)
  596. goto error_irq;
  597. }
  598. ret = irq_set_msi_desc(irq, msidesc);
  599. if (ret < 0)
  600. goto error_irq;
  601. out:
  602. mutex_unlock(&irq_mapping_update_lock);
  603. return irq;
  604. error_irq:
  605. for (; i >= 0; i--)
  606. __unbind_from_irq(irq + i);
  607. mutex_unlock(&irq_mapping_update_lock);
  608. return ret;
  609. }
  610. #endif
  611. int xen_destroy_irq(int irq)
  612. {
  613. struct physdev_unmap_pirq unmap_irq;
  614. struct irq_info *info = info_for_irq(irq);
  615. int rc = -ENOENT;
  616. mutex_lock(&irq_mapping_update_lock);
  617. /*
  618. * If trying to remove a vector in a MSI group different
  619. * than the first one skip the PIRQ unmap unless this vector
  620. * is the first one in the group.
  621. */
  622. if (xen_initial_domain() && !(info->u.pirq.flags & PIRQ_MSI_GROUP)) {
  623. unmap_irq.pirq = info->u.pirq.pirq;
  624. unmap_irq.domid = info->u.pirq.domid;
  625. rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
  626. /* If another domain quits without making the pci_disable_msix
  627. * call, the Xen hypervisor takes care of freeing the PIRQs
  628. * (free_domain_pirqs).
  629. */
  630. if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF))
  631. pr_info("domain %d does not have %d anymore\n",
  632. info->u.pirq.domid, info->u.pirq.pirq);
  633. else if (rc) {
  634. pr_warn("unmap irq failed %d\n", rc);
  635. goto out;
  636. }
  637. }
  638. xen_free_irq(irq);
  639. out:
  640. mutex_unlock(&irq_mapping_update_lock);
  641. return rc;
  642. }
  643. int xen_irq_from_pirq(unsigned pirq)
  644. {
  645. int irq;
  646. struct irq_info *info;
  647. mutex_lock(&irq_mapping_update_lock);
  648. list_for_each_entry(info, &xen_irq_list_head, list) {
  649. if (info->type != IRQT_PIRQ)
  650. continue;
  651. irq = info->irq;
  652. if (info->u.pirq.pirq == pirq)
  653. goto out;
  654. }
  655. irq = -1;
  656. out:
  657. mutex_unlock(&irq_mapping_update_lock);
  658. return irq;
  659. }
  660. int xen_pirq_from_irq(unsigned irq)
  661. {
  662. return pirq_from_irq(irq);
  663. }
  664. EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
  665. int bind_evtchn_to_irq(unsigned int evtchn)
  666. {
  667. int irq;
  668. int ret;
  669. if (evtchn >= xen_evtchn_max_channels())
  670. return -ENOMEM;
  671. mutex_lock(&irq_mapping_update_lock);
  672. irq = get_evtchn_to_irq(evtchn);
  673. if (irq == -1) {
  674. irq = xen_allocate_irq_dynamic();
  675. if (irq < 0)
  676. goto out;
  677. irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
  678. handle_edge_irq, "event");
  679. ret = xen_irq_info_evtchn_setup(irq, evtchn);
  680. if (ret < 0) {
  681. __unbind_from_irq(irq);
  682. irq = ret;
  683. goto out;
  684. }
  685. /* New interdomain events are bound to VCPU 0. */
  686. bind_evtchn_to_cpu(evtchn, 0);
  687. } else {
  688. struct irq_info *info = info_for_irq(irq);
  689. WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
  690. }
  691. out:
  692. mutex_unlock(&irq_mapping_update_lock);
  693. return irq;
  694. }
  695. EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
  696. static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
  697. {
  698. struct evtchn_bind_ipi bind_ipi;
  699. int evtchn, irq;
  700. int ret;
  701. mutex_lock(&irq_mapping_update_lock);
  702. irq = per_cpu(ipi_to_irq, cpu)[ipi];
  703. if (irq == -1) {
  704. irq = xen_allocate_irq_dynamic();
  705. if (irq < 0)
  706. goto out;
  707. irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
  708. handle_percpu_irq, "ipi");
  709. bind_ipi.vcpu = xen_vcpu_nr(cpu);
  710. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
  711. &bind_ipi) != 0)
  712. BUG();
  713. evtchn = bind_ipi.port;
  714. ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
  715. if (ret < 0) {
  716. __unbind_from_irq(irq);
  717. irq = ret;
  718. goto out;
  719. }
  720. bind_evtchn_to_cpu(evtchn, cpu);
  721. } else {
  722. struct irq_info *info = info_for_irq(irq);
  723. WARN_ON(info == NULL || info->type != IRQT_IPI);
  724. }
  725. out:
  726. mutex_unlock(&irq_mapping_update_lock);
  727. return irq;
  728. }
  729. int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
  730. unsigned int remote_port)
  731. {
  732. struct evtchn_bind_interdomain bind_interdomain;
  733. int err;
  734. bind_interdomain.remote_dom = remote_domain;
  735. bind_interdomain.remote_port = remote_port;
  736. err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
  737. &bind_interdomain);
  738. return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
  739. }
  740. EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq);
  741. static int find_virq(unsigned int virq, unsigned int cpu)
  742. {
  743. struct evtchn_status status;
  744. int port, rc = -ENOENT;
  745. memset(&status, 0, sizeof(status));
  746. for (port = 0; port < xen_evtchn_max_channels(); port++) {
  747. status.dom = DOMID_SELF;
  748. status.port = port;
  749. rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
  750. if (rc < 0)
  751. continue;
  752. if (status.status != EVTCHNSTAT_virq)
  753. continue;
  754. if (status.u.virq == virq && status.vcpu == cpu) {
  755. rc = port;
  756. break;
  757. }
  758. }
  759. return rc;
  760. }
  761. /**
  762. * xen_evtchn_nr_channels - number of usable event channel ports
  763. *
  764. * This may be less than the maximum supported by the current
  765. * hypervisor ABI. Use xen_evtchn_max_channels() for the maximum
  766. * supported.
  767. */
  768. unsigned xen_evtchn_nr_channels(void)
  769. {
  770. return evtchn_ops->nr_channels();
  771. }
  772. EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels);
  773. int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
  774. {
  775. struct evtchn_bind_virq bind_virq;
  776. int evtchn, irq, ret;
  777. mutex_lock(&irq_mapping_update_lock);
  778. irq = per_cpu(virq_to_irq, cpu)[virq];
  779. if (irq == -1) {
  780. irq = xen_allocate_irq_dynamic();
  781. if (irq < 0)
  782. goto out;
  783. if (percpu)
  784. irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
  785. handle_percpu_irq, "virq");
  786. else
  787. irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
  788. handle_edge_irq, "virq");
  789. bind_virq.virq = virq;
  790. bind_virq.vcpu = xen_vcpu_nr(cpu);
  791. ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
  792. &bind_virq);
  793. if (ret == 0)
  794. evtchn = bind_virq.port;
  795. else {
  796. if (ret == -EEXIST)
  797. ret = find_virq(virq, cpu);
  798. BUG_ON(ret < 0);
  799. evtchn = ret;
  800. }
  801. ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
  802. if (ret < 0) {
  803. __unbind_from_irq(irq);
  804. irq = ret;
  805. goto out;
  806. }
  807. bind_evtchn_to_cpu(evtchn, cpu);
  808. } else {
  809. struct irq_info *info = info_for_irq(irq);
  810. WARN_ON(info == NULL || info->type != IRQT_VIRQ);
  811. }
  812. out:
  813. mutex_unlock(&irq_mapping_update_lock);
  814. return irq;
  815. }
  816. static void unbind_from_irq(unsigned int irq)
  817. {
  818. mutex_lock(&irq_mapping_update_lock);
  819. __unbind_from_irq(irq);
  820. mutex_unlock(&irq_mapping_update_lock);
  821. }
  822. int bind_evtchn_to_irqhandler(unsigned int evtchn,
  823. irq_handler_t handler,
  824. unsigned long irqflags,
  825. const char *devname, void *dev_id)
  826. {
  827. int irq, retval;
  828. irq = bind_evtchn_to_irq(evtchn);
  829. if (irq < 0)
  830. return irq;
  831. retval = request_irq(irq, handler, irqflags, devname, dev_id);
  832. if (retval != 0) {
  833. unbind_from_irq(irq);
  834. return retval;
  835. }
  836. return irq;
  837. }
  838. EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
  839. int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
  840. unsigned int remote_port,
  841. irq_handler_t handler,
  842. unsigned long irqflags,
  843. const char *devname,
  844. void *dev_id)
  845. {
  846. int irq, retval;
  847. irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
  848. if (irq < 0)
  849. return irq;
  850. retval = request_irq(irq, handler, irqflags, devname, dev_id);
  851. if (retval != 0) {
  852. unbind_from_irq(irq);
  853. return retval;
  854. }
  855. return irq;
  856. }
  857. EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
  858. int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
  859. irq_handler_t handler,
  860. unsigned long irqflags, const char *devname, void *dev_id)
  861. {
  862. int irq, retval;
  863. irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU);
  864. if (irq < 0)
  865. return irq;
  866. retval = request_irq(irq, handler, irqflags, devname, dev_id);
  867. if (retval != 0) {
  868. unbind_from_irq(irq);
  869. return retval;
  870. }
  871. return irq;
  872. }
  873. EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
  874. int bind_ipi_to_irqhandler(enum ipi_vector ipi,
  875. unsigned int cpu,
  876. irq_handler_t handler,
  877. unsigned long irqflags,
  878. const char *devname,
  879. void *dev_id)
  880. {
  881. int irq, retval;
  882. irq = bind_ipi_to_irq(ipi, cpu);
  883. if (irq < 0)
  884. return irq;
  885. irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME;
  886. retval = request_irq(irq, handler, irqflags, devname, dev_id);
  887. if (retval != 0) {
  888. unbind_from_irq(irq);
  889. return retval;
  890. }
  891. return irq;
  892. }
  893. void unbind_from_irqhandler(unsigned int irq, void *dev_id)
  894. {
  895. struct irq_info *info = irq_get_handler_data(irq);
  896. if (WARN_ON(!info))
  897. return;
  898. free_irq(irq, dev_id);
  899. unbind_from_irq(irq);
  900. }
  901. EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
  902. /**
  903. * xen_set_irq_priority() - set an event channel priority.
  904. * @irq:irq bound to an event channel.
  905. * @priority: priority between XEN_IRQ_PRIORITY_MAX and XEN_IRQ_PRIORITY_MIN.
  906. */
  907. int xen_set_irq_priority(unsigned irq, unsigned priority)
  908. {
  909. struct evtchn_set_priority set_priority;
  910. set_priority.port = evtchn_from_irq(irq);
  911. set_priority.priority = priority;
  912. return HYPERVISOR_event_channel_op(EVTCHNOP_set_priority,
  913. &set_priority);
  914. }
  915. EXPORT_SYMBOL_GPL(xen_set_irq_priority);
  916. int evtchn_make_refcounted(unsigned int evtchn)
  917. {
  918. int irq = get_evtchn_to_irq(evtchn);
  919. struct irq_info *info;
  920. if (irq == -1)
  921. return -ENOENT;
  922. info = irq_get_handler_data(irq);
  923. if (!info)
  924. return -ENOENT;
  925. WARN_ON(info->refcnt != -1);
  926. info->refcnt = 1;
  927. return 0;
  928. }
  929. EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
  930. int evtchn_get(unsigned int evtchn)
  931. {
  932. int irq;
  933. struct irq_info *info;
  934. int err = -ENOENT;
  935. if (evtchn >= xen_evtchn_max_channels())
  936. return -EINVAL;
  937. mutex_lock(&irq_mapping_update_lock);
  938. irq = get_evtchn_to_irq(evtchn);
  939. if (irq == -1)
  940. goto done;
  941. info = irq_get_handler_data(irq);
  942. if (!info)
  943. goto done;
  944. err = -EINVAL;
  945. if (info->refcnt <= 0)
  946. goto done;
  947. info->refcnt++;
  948. err = 0;
  949. done:
  950. mutex_unlock(&irq_mapping_update_lock);
  951. return err;
  952. }
  953. EXPORT_SYMBOL_GPL(evtchn_get);
  954. void evtchn_put(unsigned int evtchn)
  955. {
  956. int irq = get_evtchn_to_irq(evtchn);
  957. if (WARN_ON(irq == -1))
  958. return;
  959. unbind_from_irq(irq);
  960. }
  961. EXPORT_SYMBOL_GPL(evtchn_put);
  962. void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
  963. {
  964. int irq;
  965. #ifdef CONFIG_X86
  966. if (unlikely(vector == XEN_NMI_VECTOR)) {
  967. int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, xen_vcpu_nr(cpu),
  968. NULL);
  969. if (rc < 0)
  970. printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc);
  971. return;
  972. }
  973. #endif
  974. irq = per_cpu(ipi_to_irq, cpu)[vector];
  975. BUG_ON(irq < 0);
  976. notify_remote_via_irq(irq);
  977. }
  978. static DEFINE_PER_CPU(unsigned, xed_nesting_count);
  979. static void __xen_evtchn_do_upcall(void)
  980. {
  981. struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
  982. int cpu = get_cpu();
  983. unsigned count;
  984. do {
  985. vcpu_info->evtchn_upcall_pending = 0;
  986. if (__this_cpu_inc_return(xed_nesting_count) - 1)
  987. goto out;
  988. xen_evtchn_handle_events(cpu);
  989. BUG_ON(!irqs_disabled());
  990. count = __this_cpu_read(xed_nesting_count);
  991. __this_cpu_write(xed_nesting_count, 0);
  992. } while (count != 1 || vcpu_info->evtchn_upcall_pending);
  993. out:
  994. put_cpu();
  995. }
  996. void xen_evtchn_do_upcall(struct pt_regs *regs)
  997. {
  998. struct pt_regs *old_regs = set_irq_regs(regs);
  999. irq_enter();
  1000. #ifdef CONFIG_X86
  1001. exit_idle();
  1002. inc_irq_stat(irq_hv_callback_count);
  1003. #endif
  1004. __xen_evtchn_do_upcall();
  1005. irq_exit();
  1006. set_irq_regs(old_regs);
  1007. }
  1008. void xen_hvm_evtchn_do_upcall(void)
  1009. {
  1010. __xen_evtchn_do_upcall();
  1011. }
  1012. EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
  1013. /* Rebind a new event channel to an existing irq. */
  1014. void rebind_evtchn_irq(int evtchn, int irq)
  1015. {
  1016. struct irq_info *info = info_for_irq(irq);
  1017. if (WARN_ON(!info))
  1018. return;
  1019. /* Make sure the irq is masked, since the new event channel
  1020. will also be masked. */
  1021. disable_irq(irq);
  1022. mutex_lock(&irq_mapping_update_lock);
  1023. /* After resume the irq<->evtchn mappings are all cleared out */
  1024. BUG_ON(get_evtchn_to_irq(evtchn) != -1);
  1025. /* Expect irq to have been bound before,
  1026. so there should be a proper type */
  1027. BUG_ON(info->type == IRQT_UNBOUND);
  1028. (void)xen_irq_info_evtchn_setup(irq, evtchn);
  1029. mutex_unlock(&irq_mapping_update_lock);
  1030. bind_evtchn_to_cpu(evtchn, info->cpu);
  1031. /* This will be deferred until interrupt is processed */
  1032. irq_set_affinity(irq, cpumask_of(info->cpu));
  1033. /* Unmask the event channel. */
  1034. enable_irq(irq);
  1035. }
  1036. /* Rebind an evtchn so that it gets delivered to a specific cpu */
  1037. static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
  1038. {
  1039. struct evtchn_bind_vcpu bind_vcpu;
  1040. int evtchn = evtchn_from_irq(irq);
  1041. int masked;
  1042. if (!VALID_EVTCHN(evtchn))
  1043. return -1;
  1044. if (!xen_support_evtchn_rebind())
  1045. return -1;
  1046. /* Send future instances of this interrupt to other vcpu. */
  1047. bind_vcpu.port = evtchn;
  1048. bind_vcpu.vcpu = xen_vcpu_nr(tcpu);
  1049. /*
  1050. * Mask the event while changing the VCPU binding to prevent
  1051. * it being delivered on an unexpected VCPU.
  1052. */
  1053. masked = test_and_set_mask(evtchn);
  1054. /*
  1055. * If this fails, it usually just indicates that we're dealing with a
  1056. * virq or IPI channel, which don't actually need to be rebound. Ignore
  1057. * it, but don't do the xenlinux-level rebind in that case.
  1058. */
  1059. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
  1060. bind_evtchn_to_cpu(evtchn, tcpu);
  1061. if (!masked)
  1062. unmask_evtchn(evtchn);
  1063. return 0;
  1064. }
  1065. static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
  1066. bool force)
  1067. {
  1068. unsigned tcpu = cpumask_first_and(dest, cpu_online_mask);
  1069. return rebind_irq_to_cpu(data->irq, tcpu);
  1070. }
  1071. static void enable_dynirq(struct irq_data *data)
  1072. {
  1073. int evtchn = evtchn_from_irq(data->irq);
  1074. if (VALID_EVTCHN(evtchn))
  1075. unmask_evtchn(evtchn);
  1076. }
  1077. static void disable_dynirq(struct irq_data *data)
  1078. {
  1079. int evtchn = evtchn_from_irq(data->irq);
  1080. if (VALID_EVTCHN(evtchn))
  1081. mask_evtchn(evtchn);
  1082. }
  1083. static void ack_dynirq(struct irq_data *data)
  1084. {
  1085. int evtchn = evtchn_from_irq(data->irq);
  1086. if (!VALID_EVTCHN(evtchn))
  1087. return;
  1088. if (unlikely(irqd_is_setaffinity_pending(data)) &&
  1089. likely(!irqd_irq_disabled(data))) {
  1090. int masked = test_and_set_mask(evtchn);
  1091. clear_evtchn(evtchn);
  1092. irq_move_masked_irq(data);
  1093. if (!masked)
  1094. unmask_evtchn(evtchn);
  1095. } else
  1096. clear_evtchn(evtchn);
  1097. }
  1098. static void mask_ack_dynirq(struct irq_data *data)
  1099. {
  1100. disable_dynirq(data);
  1101. ack_dynirq(data);
  1102. }
  1103. static int retrigger_dynirq(struct irq_data *data)
  1104. {
  1105. unsigned int evtchn = evtchn_from_irq(data->irq);
  1106. int masked;
  1107. if (!VALID_EVTCHN(evtchn))
  1108. return 0;
  1109. masked = test_and_set_mask(evtchn);
  1110. set_evtchn(evtchn);
  1111. if (!masked)
  1112. unmask_evtchn(evtchn);
  1113. return 1;
  1114. }
  1115. static void restore_pirqs(void)
  1116. {
  1117. int pirq, rc, irq, gsi;
  1118. struct physdev_map_pirq map_irq;
  1119. struct irq_info *info;
  1120. list_for_each_entry(info, &xen_irq_list_head, list) {
  1121. if (info->type != IRQT_PIRQ)
  1122. continue;
  1123. pirq = info->u.pirq.pirq;
  1124. gsi = info->u.pirq.gsi;
  1125. irq = info->irq;
  1126. /* save/restore of PT devices doesn't work, so at this point the
  1127. * only devices present are GSI based emulated devices */
  1128. if (!gsi)
  1129. continue;
  1130. map_irq.domid = DOMID_SELF;
  1131. map_irq.type = MAP_PIRQ_TYPE_GSI;
  1132. map_irq.index = gsi;
  1133. map_irq.pirq = pirq;
  1134. rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
  1135. if (rc) {
  1136. pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
  1137. gsi, irq, pirq, rc);
  1138. xen_free_irq(irq);
  1139. continue;
  1140. }
  1141. printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
  1142. __startup_pirq(irq);
  1143. }
  1144. }
  1145. static void restore_cpu_virqs(unsigned int cpu)
  1146. {
  1147. struct evtchn_bind_virq bind_virq;
  1148. int virq, irq, evtchn;
  1149. for (virq = 0; virq < NR_VIRQS; virq++) {
  1150. if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
  1151. continue;
  1152. BUG_ON(virq_from_irq(irq) != virq);
  1153. /* Get a new binding from Xen. */
  1154. bind_virq.virq = virq;
  1155. bind_virq.vcpu = xen_vcpu_nr(cpu);
  1156. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
  1157. &bind_virq) != 0)
  1158. BUG();
  1159. evtchn = bind_virq.port;
  1160. /* Record the new mapping. */
  1161. (void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
  1162. bind_evtchn_to_cpu(evtchn, cpu);
  1163. }
  1164. }
  1165. static void restore_cpu_ipis(unsigned int cpu)
  1166. {
  1167. struct evtchn_bind_ipi bind_ipi;
  1168. int ipi, irq, evtchn;
  1169. for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
  1170. if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
  1171. continue;
  1172. BUG_ON(ipi_from_irq(irq) != ipi);
  1173. /* Get a new binding from Xen. */
  1174. bind_ipi.vcpu = xen_vcpu_nr(cpu);
  1175. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
  1176. &bind_ipi) != 0)
  1177. BUG();
  1178. evtchn = bind_ipi.port;
  1179. /* Record the new mapping. */
  1180. (void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
  1181. bind_evtchn_to_cpu(evtchn, cpu);
  1182. }
  1183. }
  1184. /* Clear an irq's pending state, in preparation for polling on it */
  1185. void xen_clear_irq_pending(int irq)
  1186. {
  1187. int evtchn = evtchn_from_irq(irq);
  1188. if (VALID_EVTCHN(evtchn))
  1189. clear_evtchn(evtchn);
  1190. }
  1191. EXPORT_SYMBOL(xen_clear_irq_pending);
  1192. void xen_set_irq_pending(int irq)
  1193. {
  1194. int evtchn = evtchn_from_irq(irq);
  1195. if (VALID_EVTCHN(evtchn))
  1196. set_evtchn(evtchn);
  1197. }
  1198. bool xen_test_irq_pending(int irq)
  1199. {
  1200. int evtchn = evtchn_from_irq(irq);
  1201. bool ret = false;
  1202. if (VALID_EVTCHN(evtchn))
  1203. ret = test_evtchn(evtchn);
  1204. return ret;
  1205. }
  1206. /* Poll waiting for an irq to become pending with timeout. In the usual case,
  1207. * the irq will be disabled so it won't deliver an interrupt. */
  1208. void xen_poll_irq_timeout(int irq, u64 timeout)
  1209. {
  1210. evtchn_port_t evtchn = evtchn_from_irq(irq);
  1211. if (VALID_EVTCHN(evtchn)) {
  1212. struct sched_poll poll;
  1213. poll.nr_ports = 1;
  1214. poll.timeout = timeout;
  1215. set_xen_guest_handle(poll.ports, &evtchn);
  1216. if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
  1217. BUG();
  1218. }
  1219. }
  1220. EXPORT_SYMBOL(xen_poll_irq_timeout);
  1221. /* Poll waiting for an irq to become pending. In the usual case, the
  1222. * irq will be disabled so it won't deliver an interrupt. */
  1223. void xen_poll_irq(int irq)
  1224. {
  1225. xen_poll_irq_timeout(irq, 0 /* no timeout */);
  1226. }
  1227. /* Check whether the IRQ line is shared with other guests. */
  1228. int xen_test_irq_shared(int irq)
  1229. {
  1230. struct irq_info *info = info_for_irq(irq);
  1231. struct physdev_irq_status_query irq_status;
  1232. if (WARN_ON(!info))
  1233. return -ENOENT;
  1234. irq_status.irq = info->u.pirq.pirq;
  1235. if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
  1236. return 0;
  1237. return !(irq_status.flags & XENIRQSTAT_shared);
  1238. }
  1239. EXPORT_SYMBOL_GPL(xen_test_irq_shared);
  1240. void xen_irq_resume(void)
  1241. {
  1242. unsigned int cpu;
  1243. struct irq_info *info;
  1244. /* New event-channel space is not 'live' yet. */
  1245. xen_evtchn_mask_all();
  1246. xen_evtchn_resume();
  1247. /* No IRQ <-> event-channel mappings. */
  1248. list_for_each_entry(info, &xen_irq_list_head, list)
  1249. info->evtchn = 0; /* zap event-channel binding */
  1250. clear_evtchn_to_irq_all();
  1251. for_each_possible_cpu(cpu) {
  1252. restore_cpu_virqs(cpu);
  1253. restore_cpu_ipis(cpu);
  1254. }
  1255. restore_pirqs();
  1256. }
  1257. static struct irq_chip xen_dynamic_chip __read_mostly = {
  1258. .name = "xen-dyn",
  1259. .irq_disable = disable_dynirq,
  1260. .irq_mask = disable_dynirq,
  1261. .irq_unmask = enable_dynirq,
  1262. .irq_ack = ack_dynirq,
  1263. .irq_mask_ack = mask_ack_dynirq,
  1264. .irq_set_affinity = set_affinity_irq,
  1265. .irq_retrigger = retrigger_dynirq,
  1266. };
  1267. static struct irq_chip xen_pirq_chip __read_mostly = {
  1268. .name = "xen-pirq",
  1269. .irq_startup = startup_pirq,
  1270. .irq_shutdown = shutdown_pirq,
  1271. .irq_enable = enable_pirq,
  1272. .irq_disable = disable_pirq,
  1273. .irq_mask = disable_dynirq,
  1274. .irq_unmask = enable_dynirq,
  1275. .irq_ack = eoi_pirq,
  1276. .irq_eoi = eoi_pirq,
  1277. .irq_mask_ack = mask_ack_pirq,
  1278. .irq_set_affinity = set_affinity_irq,
  1279. .irq_retrigger = retrigger_dynirq,
  1280. };
  1281. static struct irq_chip xen_percpu_chip __read_mostly = {
  1282. .name = "xen-percpu",
  1283. .irq_disable = disable_dynirq,
  1284. .irq_mask = disable_dynirq,
  1285. .irq_unmask = enable_dynirq,
  1286. .irq_ack = ack_dynirq,
  1287. };
  1288. int xen_set_callback_via(uint64_t via)
  1289. {
  1290. struct xen_hvm_param a;
  1291. a.domid = DOMID_SELF;
  1292. a.index = HVM_PARAM_CALLBACK_IRQ;
  1293. a.value = via;
  1294. return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
  1295. }
  1296. EXPORT_SYMBOL_GPL(xen_set_callback_via);
  1297. #ifdef CONFIG_XEN_PVHVM
  1298. /* Vector callbacks are better than PCI interrupts to receive event
  1299. * channel notifications because we can receive vector callbacks on any
  1300. * vcpu and we don't need PCI support or APIC interactions. */
  1301. void xen_callback_vector(void)
  1302. {
  1303. int rc;
  1304. uint64_t callback_via;
  1305. if (xen_have_vector_callback) {
  1306. callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
  1307. rc = xen_set_callback_via(callback_via);
  1308. if (rc) {
  1309. pr_err("Request for Xen HVM callback vector failed\n");
  1310. xen_have_vector_callback = 0;
  1311. return;
  1312. }
  1313. pr_info("Xen HVM callback vector for event delivery is enabled\n");
  1314. /* in the restore case the vector has already been allocated */
  1315. if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
  1316. alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
  1317. xen_hvm_callback_vector);
  1318. }
  1319. }
  1320. #else
  1321. void xen_callback_vector(void) {}
  1322. #endif
  1323. #undef MODULE_PARAM_PREFIX
  1324. #define MODULE_PARAM_PREFIX "xen."
  1325. static bool fifo_events = true;
  1326. module_param(fifo_events, bool, 0);
  1327. void __init xen_init_IRQ(void)
  1328. {
  1329. int ret = -EINVAL;
  1330. if (fifo_events)
  1331. ret = xen_evtchn_fifo_init();
  1332. if (ret < 0)
  1333. xen_evtchn_2l_init();
  1334. evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()),
  1335. sizeof(*evtchn_to_irq), GFP_KERNEL);
  1336. BUG_ON(!evtchn_to_irq);
  1337. /* No event channels are 'live' right now. */
  1338. xen_evtchn_mask_all();
  1339. pirq_needs_eoi = pirq_needs_eoi_flag;
  1340. #ifdef CONFIG_X86
  1341. if (xen_pv_domain()) {
  1342. irq_ctx_init(smp_processor_id());
  1343. if (xen_initial_domain())
  1344. pci_xen_initial_domain();
  1345. }
  1346. if (xen_feature(XENFEAT_hvm_callback_vector))
  1347. xen_callback_vector();
  1348. if (xen_hvm_domain()) {
  1349. native_init_IRQ();
  1350. /* pci_xen_hvm_init must be called after native_init_IRQ so that
  1351. * __acpi_register_gsi can point at the right function */
  1352. pci_xen_hvm_init();
  1353. } else {
  1354. int rc;
  1355. struct physdev_pirq_eoi_gmfn eoi_gmfn;
  1356. pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
  1357. eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map);
  1358. rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
  1359. /* TODO: No PVH support for PIRQ EOI */
  1360. if (rc != 0) {
  1361. free_page((unsigned long) pirq_eoi_map);
  1362. pirq_eoi_map = NULL;
  1363. } else
  1364. pirq_needs_eoi = pirq_check_eoi_map;
  1365. }
  1366. #endif
  1367. }