ioapic.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671
  1. /*
  2. * Copyright (C) 2001 MandrakeSoft S.A.
  3. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  4. *
  5. * MandrakeSoft S.A.
  6. * 43, rue d'Aboukir
  7. * 75002 Paris - France
  8. * http://www.linux-mandrake.com/
  9. * http://www.mandrakesoft.com/
  10. *
  11. * This library is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU Lesser General Public
  13. * License as published by the Free Software Foundation; either
  14. * version 2 of the License, or (at your option) any later version.
  15. *
  16. * This library is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * Lesser General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU Lesser General Public
  22. * License along with this library; if not, write to the Free Software
  23. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  24. *
  25. * Yunhong Jiang <yunhong.jiang@intel.com>
  26. * Yaozu (Eddie) Dong <eddie.dong@intel.com>
  27. * Based on Xen 3.1 code.
  28. */
  29. #include <linux/kvm_host.h>
  30. #include <linux/kvm.h>
  31. #include <linux/mm.h>
  32. #include <linux/highmem.h>
  33. #include <linux/smp.h>
  34. #include <linux/hrtimer.h>
  35. #include <linux/io.h>
  36. #include <linux/slab.h>
  37. #include <linux/export.h>
  38. #include <asm/processor.h>
  39. #include <asm/page.h>
  40. #include <asm/current.h>
  41. #include <trace/events/kvm.h>
  42. #include "ioapic.h"
  43. #include "lapic.h"
  44. #include "irq.h"
  45. #if 0
  46. #define ioapic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg)
  47. #else
  48. #define ioapic_debug(fmt, arg...)
  49. #endif
  50. static int ioapic_service(struct kvm_ioapic *vioapic, int irq,
  51. bool line_status);
  52. static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
  53. unsigned long addr,
  54. unsigned long length)
  55. {
  56. unsigned long result = 0;
  57. switch (ioapic->ioregsel) {
  58. case IOAPIC_REG_VERSION:
  59. result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16)
  60. | (IOAPIC_VERSION_ID & 0xff));
  61. break;
  62. case IOAPIC_REG_APIC_ID:
  63. case IOAPIC_REG_ARB_ID:
  64. result = ((ioapic->id & 0xf) << 24);
  65. break;
  66. default:
  67. {
  68. u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
  69. u64 redir_content;
  70. if (redir_index < IOAPIC_NUM_PINS)
  71. redir_content =
  72. ioapic->redirtbl[redir_index].bits;
  73. else
  74. redir_content = ~0ULL;
  75. result = (ioapic->ioregsel & 0x1) ?
  76. (redir_content >> 32) & 0xffffffff :
  77. redir_content & 0xffffffff;
  78. break;
  79. }
  80. }
  81. return result;
  82. }
  83. static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
  84. {
  85. ioapic->rtc_status.pending_eoi = 0;
  86. bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID);
  87. }
  88. static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
  89. static void rtc_status_pending_eoi_check_valid(struct kvm_ioapic *ioapic)
  90. {
  91. if (WARN_ON(ioapic->rtc_status.pending_eoi < 0))
  92. kvm_rtc_eoi_tracking_restore_all(ioapic);
  93. }
  94. static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
  95. {
  96. bool new_val, old_val;
  97. struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
  98. struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
  99. union kvm_ioapic_redirect_entry *e;
  100. e = &ioapic->redirtbl[RTC_GSI];
  101. if (!kvm_apic_match_dest(vcpu, NULL, 0, e->fields.dest_id,
  102. e->fields.dest_mode))
  103. return;
  104. new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
  105. old_val = test_bit(vcpu->vcpu_id, dest_map->map);
  106. if (new_val == old_val)
  107. return;
  108. if (new_val) {
  109. __set_bit(vcpu->vcpu_id, dest_map->map);
  110. dest_map->vectors[vcpu->vcpu_id] = e->fields.vector;
  111. ioapic->rtc_status.pending_eoi++;
  112. } else {
  113. __clear_bit(vcpu->vcpu_id, dest_map->map);
  114. ioapic->rtc_status.pending_eoi--;
  115. rtc_status_pending_eoi_check_valid(ioapic);
  116. }
  117. }
  118. void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
  119. {
  120. struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
  121. spin_lock(&ioapic->lock);
  122. __rtc_irq_eoi_tracking_restore_one(vcpu);
  123. spin_unlock(&ioapic->lock);
  124. }
  125. static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic)
  126. {
  127. struct kvm_vcpu *vcpu;
  128. int i;
  129. if (RTC_GSI >= IOAPIC_NUM_PINS)
  130. return;
  131. rtc_irq_eoi_tracking_reset(ioapic);
  132. kvm_for_each_vcpu(i, vcpu, ioapic->kvm)
  133. __rtc_irq_eoi_tracking_restore_one(vcpu);
  134. }
  135. static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu)
  136. {
  137. if (test_and_clear_bit(vcpu->vcpu_id,
  138. ioapic->rtc_status.dest_map.map)) {
  139. --ioapic->rtc_status.pending_eoi;
  140. rtc_status_pending_eoi_check_valid(ioapic);
  141. }
  142. }
  143. static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic)
  144. {
  145. if (ioapic->rtc_status.pending_eoi > 0)
  146. return true; /* coalesced */
  147. return false;
  148. }
  149. static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq,
  150. int irq_level, bool line_status)
  151. {
  152. union kvm_ioapic_redirect_entry entry;
  153. u32 mask = 1 << irq;
  154. u32 old_irr;
  155. int edge, ret;
  156. entry = ioapic->redirtbl[irq];
  157. edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);
  158. if (!irq_level) {
  159. ioapic->irr &= ~mask;
  160. ret = 1;
  161. goto out;
  162. }
  163. /*
  164. * Return 0 for coalesced interrupts; for edge-triggered interrupts,
  165. * this only happens if a previous edge has not been delivered due
  166. * do masking. For level interrupts, the remote_irr field tells
  167. * us if the interrupt is waiting for an EOI.
  168. *
  169. * RTC is special: it is edge-triggered, but userspace likes to know
  170. * if it has been already ack-ed via EOI because coalesced RTC
  171. * interrupts lead to time drift in Windows guests. So we track
  172. * EOI manually for the RTC interrupt.
  173. */
  174. if (irq == RTC_GSI && line_status &&
  175. rtc_irq_check_coalesced(ioapic)) {
  176. ret = 0;
  177. goto out;
  178. }
  179. old_irr = ioapic->irr;
  180. ioapic->irr |= mask;
  181. if (edge)
  182. ioapic->irr_delivered &= ~mask;
  183. if ((edge && old_irr == ioapic->irr) ||
  184. (!edge && entry.fields.remote_irr)) {
  185. ret = 0;
  186. goto out;
  187. }
  188. ret = ioapic_service(ioapic, irq, line_status);
  189. out:
  190. trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
  191. return ret;
  192. }
  193. static void kvm_ioapic_inject_all(struct kvm_ioapic *ioapic, unsigned long irr)
  194. {
  195. u32 idx;
  196. rtc_irq_eoi_tracking_reset(ioapic);
  197. for_each_set_bit(idx, &irr, IOAPIC_NUM_PINS)
  198. ioapic_set_irq(ioapic, idx, 1, true);
  199. kvm_rtc_eoi_tracking_restore_all(ioapic);
  200. }
  201. void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors)
  202. {
  203. struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
  204. struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
  205. union kvm_ioapic_redirect_entry *e;
  206. int index;
  207. spin_lock(&ioapic->lock);
  208. /* Make sure we see any missing RTC EOI */
  209. if (test_bit(vcpu->vcpu_id, dest_map->map))
  210. __set_bit(dest_map->vectors[vcpu->vcpu_id],
  211. ioapic_handled_vectors);
  212. for (index = 0; index < IOAPIC_NUM_PINS; index++) {
  213. e = &ioapic->redirtbl[index];
  214. if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
  215. kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) ||
  216. index == RTC_GSI) {
  217. if (kvm_apic_match_dest(vcpu, NULL, 0,
  218. e->fields.dest_id, e->fields.dest_mode) ||
  219. (e->fields.trig_mode == IOAPIC_EDGE_TRIG &&
  220. kvm_apic_pending_eoi(vcpu, e->fields.vector)))
  221. __set_bit(e->fields.vector,
  222. ioapic_handled_vectors);
  223. }
  224. }
  225. spin_unlock(&ioapic->lock);
  226. }
  227. void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
  228. {
  229. struct kvm_ioapic *ioapic = kvm->arch.vioapic;
  230. if (!ioapic)
  231. return;
  232. kvm_make_scan_ioapic_request(kvm);
  233. }
  234. static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
  235. {
  236. unsigned index;
  237. bool mask_before, mask_after;
  238. union kvm_ioapic_redirect_entry *e;
  239. switch (ioapic->ioregsel) {
  240. case IOAPIC_REG_VERSION:
  241. /* Writes are ignored. */
  242. break;
  243. case IOAPIC_REG_APIC_ID:
  244. ioapic->id = (val >> 24) & 0xf;
  245. break;
  246. case IOAPIC_REG_ARB_ID:
  247. break;
  248. default:
  249. index = (ioapic->ioregsel - 0x10) >> 1;
  250. ioapic_debug("change redir index %x val %x\n", index, val);
  251. if (index >= IOAPIC_NUM_PINS)
  252. return;
  253. e = &ioapic->redirtbl[index];
  254. mask_before = e->fields.mask;
  255. if (ioapic->ioregsel & 1) {
  256. e->bits &= 0xffffffff;
  257. e->bits |= (u64) val << 32;
  258. } else {
  259. e->bits &= ~0xffffffffULL;
  260. e->bits |= (u32) val;
  261. e->fields.remote_irr = 0;
  262. }
  263. mask_after = e->fields.mask;
  264. if (mask_before != mask_after)
  265. kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
  266. if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
  267. && ioapic->irr & (1 << index))
  268. ioapic_service(ioapic, index, false);
  269. kvm_vcpu_request_scan_ioapic(ioapic->kvm);
  270. break;
  271. }
  272. }
  273. static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
  274. {
  275. union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
  276. struct kvm_lapic_irq irqe;
  277. int ret;
  278. if (entry->fields.mask)
  279. return -1;
  280. ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
  281. "vector=%x trig_mode=%x\n",
  282. entry->fields.dest_id, entry->fields.dest_mode,
  283. entry->fields.delivery_mode, entry->fields.vector,
  284. entry->fields.trig_mode);
  285. irqe.dest_id = entry->fields.dest_id;
  286. irqe.vector = entry->fields.vector;
  287. irqe.dest_mode = entry->fields.dest_mode;
  288. irqe.trig_mode = entry->fields.trig_mode;
  289. irqe.delivery_mode = entry->fields.delivery_mode << 8;
  290. irqe.level = 1;
  291. irqe.shorthand = 0;
  292. irqe.msi_redir_hint = false;
  293. if (irqe.trig_mode == IOAPIC_EDGE_TRIG)
  294. ioapic->irr_delivered |= 1 << irq;
  295. if (irq == RTC_GSI && line_status) {
  296. /*
  297. * pending_eoi cannot ever become negative (see
  298. * rtc_status_pending_eoi_check_valid) and the caller
  299. * ensures that it is only called if it is >= zero, namely
  300. * if rtc_irq_check_coalesced returns false).
  301. */
  302. BUG_ON(ioapic->rtc_status.pending_eoi != 0);
  303. ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
  304. &ioapic->rtc_status.dest_map);
  305. ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret);
  306. } else
  307. ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);
  308. if (ret && irqe.trig_mode == IOAPIC_LEVEL_TRIG)
  309. entry->fields.remote_irr = 1;
  310. return ret;
  311. }
  312. int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
  313. int level, bool line_status)
  314. {
  315. int ret, irq_level;
  316. BUG_ON(irq < 0 || irq >= IOAPIC_NUM_PINS);
  317. spin_lock(&ioapic->lock);
  318. irq_level = __kvm_irq_line_state(&ioapic->irq_states[irq],
  319. irq_source_id, level);
  320. ret = ioapic_set_irq(ioapic, irq, irq_level, line_status);
  321. spin_unlock(&ioapic->lock);
  322. return ret;
  323. }
  324. void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id)
  325. {
  326. int i;
  327. spin_lock(&ioapic->lock);
  328. for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++)
  329. __clear_bit(irq_source_id, &ioapic->irq_states[i]);
  330. spin_unlock(&ioapic->lock);
  331. }
  332. static void kvm_ioapic_eoi_inject_work(struct work_struct *work)
  333. {
  334. int i;
  335. struct kvm_ioapic *ioapic = container_of(work, struct kvm_ioapic,
  336. eoi_inject.work);
  337. spin_lock(&ioapic->lock);
  338. for (i = 0; i < IOAPIC_NUM_PINS; i++) {
  339. union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
  340. if (ent->fields.trig_mode != IOAPIC_LEVEL_TRIG)
  341. continue;
  342. if (ioapic->irr & (1 << i) && !ent->fields.remote_irr)
  343. ioapic_service(ioapic, i, false);
  344. }
  345. spin_unlock(&ioapic->lock);
  346. }
  347. #define IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT 10000
  348. static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
  349. struct kvm_ioapic *ioapic, int vector, int trigger_mode)
  350. {
  351. struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
  352. struct kvm_lapic *apic = vcpu->arch.apic;
  353. int i;
  354. /* RTC special handling */
  355. if (test_bit(vcpu->vcpu_id, dest_map->map) &&
  356. vector == dest_map->vectors[vcpu->vcpu_id])
  357. rtc_irq_eoi(ioapic, vcpu);
  358. for (i = 0; i < IOAPIC_NUM_PINS; i++) {
  359. union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
  360. if (ent->fields.vector != vector)
  361. continue;
  362. /*
  363. * We are dropping lock while calling ack notifiers because ack
  364. * notifier callbacks for assigned devices call into IOAPIC
  365. * recursively. Since remote_irr is cleared only after call
  366. * to notifiers if the same vector will be delivered while lock
  367. * is dropped it will be put into irr and will be delivered
  368. * after ack notifier returns.
  369. */
  370. spin_unlock(&ioapic->lock);
  371. kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
  372. spin_lock(&ioapic->lock);
  373. if (trigger_mode != IOAPIC_LEVEL_TRIG ||
  374. kvm_lapic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
  375. continue;
  376. ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
  377. ent->fields.remote_irr = 0;
  378. if (!ent->fields.mask && (ioapic->irr & (1 << i))) {
  379. ++ioapic->irq_eoi[i];
  380. if (ioapic->irq_eoi[i] == IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT) {
  381. /*
  382. * Real hardware does not deliver the interrupt
  383. * immediately during eoi broadcast, and this
  384. * lets a buggy guest make slow progress
  385. * even if it does not correctly handle a
  386. * level-triggered interrupt. Emulate this
  387. * behavior if we detect an interrupt storm.
  388. */
  389. schedule_delayed_work(&ioapic->eoi_inject, HZ / 100);
  390. ioapic->irq_eoi[i] = 0;
  391. trace_kvm_ioapic_delayed_eoi_inj(ent->bits);
  392. } else {
  393. ioapic_service(ioapic, i, false);
  394. }
  395. } else {
  396. ioapic->irq_eoi[i] = 0;
  397. }
  398. }
  399. }
  400. void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode)
  401. {
  402. struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
  403. spin_lock(&ioapic->lock);
  404. __kvm_ioapic_update_eoi(vcpu, ioapic, vector, trigger_mode);
  405. spin_unlock(&ioapic->lock);
  406. }
  407. static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev)
  408. {
  409. return container_of(dev, struct kvm_ioapic, dev);
  410. }
  411. static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr)
  412. {
  413. return ((addr >= ioapic->base_address &&
  414. (addr < ioapic->base_address + IOAPIC_MEM_LENGTH)));
  415. }
  416. static int ioapic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
  417. gpa_t addr, int len, void *val)
  418. {
  419. struct kvm_ioapic *ioapic = to_ioapic(this);
  420. u32 result;
  421. if (!ioapic_in_range(ioapic, addr))
  422. return -EOPNOTSUPP;
  423. ioapic_debug("addr %lx\n", (unsigned long)addr);
  424. ASSERT(!(addr & 0xf)); /* check alignment */
  425. addr &= 0xff;
  426. spin_lock(&ioapic->lock);
  427. switch (addr) {
  428. case IOAPIC_REG_SELECT:
  429. result = ioapic->ioregsel;
  430. break;
  431. case IOAPIC_REG_WINDOW:
  432. result = ioapic_read_indirect(ioapic, addr, len);
  433. break;
  434. default:
  435. result = 0;
  436. break;
  437. }
  438. spin_unlock(&ioapic->lock);
  439. switch (len) {
  440. case 8:
  441. *(u64 *) val = result;
  442. break;
  443. case 1:
  444. case 2:
  445. case 4:
  446. memcpy(val, (char *)&result, len);
  447. break;
  448. default:
  449. printk(KERN_WARNING "ioapic: wrong length %d\n", len);
  450. }
  451. return 0;
  452. }
  453. static int ioapic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
  454. gpa_t addr, int len, const void *val)
  455. {
  456. struct kvm_ioapic *ioapic = to_ioapic(this);
  457. u32 data;
  458. if (!ioapic_in_range(ioapic, addr))
  459. return -EOPNOTSUPP;
  460. ioapic_debug("ioapic_mmio_write addr=%p len=%d val=%p\n",
  461. (void*)addr, len, val);
  462. ASSERT(!(addr & 0xf)); /* check alignment */
  463. switch (len) {
  464. case 8:
  465. case 4:
  466. data = *(u32 *) val;
  467. break;
  468. case 2:
  469. data = *(u16 *) val;
  470. break;
  471. case 1:
  472. data = *(u8 *) val;
  473. break;
  474. default:
  475. printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
  476. return 0;
  477. }
  478. addr &= 0xff;
  479. spin_lock(&ioapic->lock);
  480. switch (addr) {
  481. case IOAPIC_REG_SELECT:
  482. ioapic->ioregsel = data & 0xFF; /* 8-bit register */
  483. break;
  484. case IOAPIC_REG_WINDOW:
  485. ioapic_write_indirect(ioapic, data);
  486. break;
  487. default:
  488. break;
  489. }
  490. spin_unlock(&ioapic->lock);
  491. return 0;
  492. }
  493. static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
  494. {
  495. int i;
  496. cancel_delayed_work_sync(&ioapic->eoi_inject);
  497. for (i = 0; i < IOAPIC_NUM_PINS; i++)
  498. ioapic->redirtbl[i].fields.mask = 1;
  499. ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS;
  500. ioapic->ioregsel = 0;
  501. ioapic->irr = 0;
  502. ioapic->irr_delivered = 0;
  503. ioapic->id = 0;
  504. memset(ioapic->irq_eoi, 0x00, sizeof(ioapic->irq_eoi));
  505. rtc_irq_eoi_tracking_reset(ioapic);
  506. }
  507. static const struct kvm_io_device_ops ioapic_mmio_ops = {
  508. .read = ioapic_mmio_read,
  509. .write = ioapic_mmio_write,
  510. };
  511. int kvm_ioapic_init(struct kvm *kvm)
  512. {
  513. struct kvm_ioapic *ioapic;
  514. int ret;
  515. ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL);
  516. if (!ioapic)
  517. return -ENOMEM;
  518. spin_lock_init(&ioapic->lock);
  519. INIT_DELAYED_WORK(&ioapic->eoi_inject, kvm_ioapic_eoi_inject_work);
  520. kvm->arch.vioapic = ioapic;
  521. kvm_ioapic_reset(ioapic);
  522. kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
  523. ioapic->kvm = kvm;
  524. mutex_lock(&kvm->slots_lock);
  525. ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, ioapic->base_address,
  526. IOAPIC_MEM_LENGTH, &ioapic->dev);
  527. mutex_unlock(&kvm->slots_lock);
  528. if (ret < 0) {
  529. kvm->arch.vioapic = NULL;
  530. kfree(ioapic);
  531. return ret;
  532. }
  533. kvm_vcpu_request_scan_ioapic(kvm);
  534. return ret;
  535. }
  536. void kvm_ioapic_destroy(struct kvm *kvm)
  537. {
  538. struct kvm_ioapic *ioapic = kvm->arch.vioapic;
  539. cancel_delayed_work_sync(&ioapic->eoi_inject);
  540. kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
  541. kvm->arch.vioapic = NULL;
  542. kfree(ioapic);
  543. }
  544. int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
  545. {
  546. struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
  547. if (!ioapic)
  548. return -EINVAL;
  549. spin_lock(&ioapic->lock);
  550. memcpy(state, ioapic, sizeof(struct kvm_ioapic_state));
  551. state->irr &= ~ioapic->irr_delivered;
  552. spin_unlock(&ioapic->lock);
  553. return 0;
  554. }
  555. int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
  556. {
  557. struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
  558. if (!ioapic)
  559. return -EINVAL;
  560. spin_lock(&ioapic->lock);
  561. memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
  562. ioapic->irr = 0;
  563. ioapic->irr_delivered = 0;
  564. kvm_vcpu_request_scan_ioapic(kvm);
  565. kvm_ioapic_inject_all(ioapic, state->irr);
  566. spin_unlock(&ioapic->lock);
  567. return 0;
  568. }