vgic-its.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570
  1. /*
  2. * GICv3 ITS emulation
  3. *
  4. * Copyright (C) 2015,2016 ARM Ltd.
  5. * Author: Andre Przywara <andre.przywara@arm.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/cpu.h>
  20. #include <linux/kvm.h>
  21. #include <linux/kvm_host.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/list.h>
  24. #include <linux/uaccess.h>
  25. #include <linux/irqchip/arm-gic-v3.h>
  26. #include <asm/kvm_emulate.h>
  27. #include <asm/kvm_arm.h>
  28. #include <asm/kvm_mmu.h>
  29. #include "vgic.h"
  30. #include "vgic-mmio.h"
  31. /*
  32. * Creates a new (reference to a) struct vgic_irq for a given LPI.
  33. * If this LPI is already mapped on another ITS, we increase its refcount
  34. * and return a pointer to the existing structure.
  35. * If this is a "new" LPI, we allocate and initialize a new struct vgic_irq.
  36. * This function returns a pointer to the _unlocked_ structure.
  37. */
  38. static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid)
  39. {
  40. struct vgic_dist *dist = &kvm->arch.vgic;
  41. struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
  42. /* In this case there is no put, since we keep the reference. */
  43. if (irq)
  44. return irq;
  45. irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL);
  46. if (!irq)
  47. return ERR_PTR(-ENOMEM);
  48. INIT_LIST_HEAD(&irq->lpi_list);
  49. INIT_LIST_HEAD(&irq->ap_list);
  50. spin_lock_init(&irq->irq_lock);
  51. irq->config = VGIC_CONFIG_EDGE;
  52. kref_init(&irq->refcount);
  53. irq->intid = intid;
  54. spin_lock(&dist->lpi_list_lock);
  55. /*
  56. * There could be a race with another vgic_add_lpi(), so we need to
  57. * check that we don't add a second list entry with the same LPI.
  58. */
  59. list_for_each_entry(oldirq, &dist->lpi_list_head, lpi_list) {
  60. if (oldirq->intid != intid)
  61. continue;
  62. /* Someone was faster with adding this LPI, lets use that. */
  63. kfree(irq);
  64. irq = oldirq;
  65. /*
  66. * This increases the refcount, the caller is expected to
  67. * call vgic_put_irq() on the returned pointer once it's
  68. * finished with the IRQ.
  69. */
  70. vgic_get_irq_kref(irq);
  71. goto out_unlock;
  72. }
  73. list_add_tail(&irq->lpi_list, &dist->lpi_list_head);
  74. dist->lpi_list_count++;
  75. out_unlock:
  76. spin_unlock(&dist->lpi_list_lock);
  77. return irq;
  78. }
  79. struct its_device {
  80. struct list_head dev_list;
  81. /* the head for the list of ITTEs */
  82. struct list_head itt_head;
  83. u32 device_id;
  84. };
  85. #define COLLECTION_NOT_MAPPED ((u32)~0)
  86. struct its_collection {
  87. struct list_head coll_list;
  88. u32 collection_id;
  89. u32 target_addr;
  90. };
  91. #define its_is_collection_mapped(coll) ((coll) && \
  92. ((coll)->target_addr != COLLECTION_NOT_MAPPED))
  93. struct its_itte {
  94. struct list_head itte_list;
  95. struct vgic_irq *irq;
  96. struct its_collection *collection;
  97. u32 lpi;
  98. u32 event_id;
  99. };
  100. /*
  101. * Find and returns a device in the device table for an ITS.
  102. * Must be called with the its_lock mutex held.
  103. */
  104. static struct its_device *find_its_device(struct vgic_its *its, u32 device_id)
  105. {
  106. struct its_device *device;
  107. list_for_each_entry(device, &its->device_list, dev_list)
  108. if (device_id == device->device_id)
  109. return device;
  110. return NULL;
  111. }
  112. /*
  113. * Find and returns an interrupt translation table entry (ITTE) for a given
  114. * Device ID/Event ID pair on an ITS.
  115. * Must be called with the its_lock mutex held.
  116. */
  117. static struct its_itte *find_itte(struct vgic_its *its, u32 device_id,
  118. u32 event_id)
  119. {
  120. struct its_device *device;
  121. struct its_itte *itte;
  122. device = find_its_device(its, device_id);
  123. if (device == NULL)
  124. return NULL;
  125. list_for_each_entry(itte, &device->itt_head, itte_list)
  126. if (itte->event_id == event_id)
  127. return itte;
  128. return NULL;
  129. }
  130. /* To be used as an iterator this macro misses the enclosing parentheses */
  131. #define for_each_lpi_its(dev, itte, its) \
  132. list_for_each_entry(dev, &(its)->device_list, dev_list) \
  133. list_for_each_entry(itte, &(dev)->itt_head, itte_list)
  134. /*
  135. * We only implement 48 bits of PA at the moment, although the ITS
  136. * supports more. Let's be restrictive here.
  137. */
  138. #define BASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 16))
  139. #define CBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 12))
  140. #define PENDBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 16))
  141. #define PROPBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 12))
  142. #define GIC_LPI_OFFSET 8192
  143. /*
  144. * Finds and returns a collection in the ITS collection table.
  145. * Must be called with the its_lock mutex held.
  146. */
  147. static struct its_collection *find_collection(struct vgic_its *its, int coll_id)
  148. {
  149. struct its_collection *collection;
  150. list_for_each_entry(collection, &its->collection_list, coll_list) {
  151. if (coll_id == collection->collection_id)
  152. return collection;
  153. }
  154. return NULL;
  155. }
  156. #define LPI_PROP_ENABLE_BIT(p) ((p) & LPI_PROP_ENABLED)
  157. #define LPI_PROP_PRIORITY(p) ((p) & 0xfc)
  158. /*
  159. * Reads the configuration data for a given LPI from guest memory and
  160. * updates the fields in struct vgic_irq.
  161. * If filter_vcpu is not NULL, applies only if the IRQ is targeting this
  162. * VCPU. Unconditionally applies if filter_vcpu is NULL.
  163. */
  164. static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
  165. struct kvm_vcpu *filter_vcpu)
  166. {
  167. u64 propbase = PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
  168. u8 prop;
  169. int ret;
  170. ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
  171. &prop, 1);
  172. if (ret)
  173. return ret;
  174. spin_lock(&irq->irq_lock);
  175. if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
  176. irq->priority = LPI_PROP_PRIORITY(prop);
  177. irq->enabled = LPI_PROP_ENABLE_BIT(prop);
  178. vgic_queue_irq_unlock(kvm, irq);
  179. } else {
  180. spin_unlock(&irq->irq_lock);
  181. }
  182. return 0;
  183. }
  184. /*
  185. * Create a snapshot of the current LPI list, so that we can enumerate all
  186. * LPIs without holding any lock.
  187. * Returns the array length and puts the kmalloc'ed array into intid_ptr.
  188. */
  189. static int vgic_copy_lpi_list(struct kvm *kvm, u32 **intid_ptr)
  190. {
  191. struct vgic_dist *dist = &kvm->arch.vgic;
  192. struct vgic_irq *irq;
  193. u32 *intids;
  194. int irq_count = dist->lpi_list_count, i = 0;
  195. /*
  196. * We use the current value of the list length, which may change
  197. * after the kmalloc. We don't care, because the guest shouldn't
  198. * change anything while the command handling is still running,
  199. * and in the worst case we would miss a new IRQ, which one wouldn't
  200. * expect to be covered by this command anyway.
  201. */
  202. intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL);
  203. if (!intids)
  204. return -ENOMEM;
  205. spin_lock(&dist->lpi_list_lock);
  206. list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
  207. /* We don't need to "get" the IRQ, as we hold the list lock. */
  208. intids[i] = irq->intid;
  209. if (++i == irq_count)
  210. break;
  211. }
  212. spin_unlock(&dist->lpi_list_lock);
  213. *intid_ptr = intids;
  214. return irq_count;
  215. }
  216. /*
  217. * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
  218. * is targeting) to the VGIC's view, which deals with target VCPUs.
  219. * Needs to be called whenever either the collection for a LPIs has
  220. * changed or the collection itself got retargeted.
  221. */
  222. static void update_affinity_itte(struct kvm *kvm, struct its_itte *itte)
  223. {
  224. struct kvm_vcpu *vcpu;
  225. if (!its_is_collection_mapped(itte->collection))
  226. return;
  227. vcpu = kvm_get_vcpu(kvm, itte->collection->target_addr);
  228. spin_lock(&itte->irq->irq_lock);
  229. itte->irq->target_vcpu = vcpu;
  230. spin_unlock(&itte->irq->irq_lock);
  231. }
  232. /*
  233. * Updates the target VCPU for every LPI targeting this collection.
  234. * Must be called with the its_lock mutex held.
  235. */
  236. static void update_affinity_collection(struct kvm *kvm, struct vgic_its *its,
  237. struct its_collection *coll)
  238. {
  239. struct its_device *device;
  240. struct its_itte *itte;
  241. for_each_lpi_its(device, itte, its) {
  242. if (!itte->collection || coll != itte->collection)
  243. continue;
  244. update_affinity_itte(kvm, itte);
  245. }
  246. }
  247. static u32 max_lpis_propbaser(u64 propbaser)
  248. {
  249. int nr_idbits = (propbaser & 0x1f) + 1;
  250. return 1U << min(nr_idbits, INTERRUPT_ID_BITS_ITS);
  251. }
  252. /*
  253. * Scan the whole LPI pending table and sync the pending bit in there
  254. * with our own data structures. This relies on the LPI being
  255. * mapped before.
  256. */
  257. static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
  258. {
  259. gpa_t pendbase = PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
  260. struct vgic_irq *irq;
  261. int last_byte_offset = -1;
  262. int ret = 0;
  263. u32 *intids;
  264. int nr_irqs, i;
  265. nr_irqs = vgic_copy_lpi_list(vcpu->kvm, &intids);
  266. if (nr_irqs < 0)
  267. return nr_irqs;
  268. for (i = 0; i < nr_irqs; i++) {
  269. int byte_offset, bit_nr;
  270. u8 pendmask;
  271. byte_offset = intids[i] / BITS_PER_BYTE;
  272. bit_nr = intids[i] % BITS_PER_BYTE;
  273. /*
  274. * For contiguously allocated LPIs chances are we just read
  275. * this very same byte in the last iteration. Reuse that.
  276. */
  277. if (byte_offset != last_byte_offset) {
  278. ret = kvm_read_guest(vcpu->kvm, pendbase + byte_offset,
  279. &pendmask, 1);
  280. if (ret) {
  281. kfree(intids);
  282. return ret;
  283. }
  284. last_byte_offset = byte_offset;
  285. }
  286. irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
  287. spin_lock(&irq->irq_lock);
  288. irq->pending = pendmask & (1U << bit_nr);
  289. vgic_queue_irq_unlock(vcpu->kvm, irq);
  290. vgic_put_irq(vcpu->kvm, irq);
  291. }
  292. kfree(intids);
  293. return ret;
  294. }
  295. static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
  296. struct vgic_its *its,
  297. gpa_t addr, unsigned int len)
  298. {
  299. u32 reg = 0;
  300. mutex_lock(&its->cmd_lock);
  301. if (its->creadr == its->cwriter)
  302. reg |= GITS_CTLR_QUIESCENT;
  303. if (its->enabled)
  304. reg |= GITS_CTLR_ENABLE;
  305. mutex_unlock(&its->cmd_lock);
  306. return reg;
  307. }
  308. static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
  309. gpa_t addr, unsigned int len,
  310. unsigned long val)
  311. {
  312. its->enabled = !!(val & GITS_CTLR_ENABLE);
  313. }
  314. static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
  315. struct vgic_its *its,
  316. gpa_t addr, unsigned int len)
  317. {
  318. u64 reg = GITS_TYPER_PLPIS;
  319. /*
  320. * We use linear CPU numbers for redistributor addressing,
  321. * so GITS_TYPER.PTA is 0.
  322. * Also we force all PROPBASER registers to be the same, so
  323. * CommonLPIAff is 0 as well.
  324. * To avoid memory waste in the guest, we keep the number of IDBits and
  325. * DevBits low - as least for the time being.
  326. */
  327. reg |= 0x0f << GITS_TYPER_DEVBITS_SHIFT;
  328. reg |= 0x0f << GITS_TYPER_IDBITS_SHIFT;
  329. return extract_bytes(reg, addr & 7, len);
  330. }
  331. static unsigned long vgic_mmio_read_its_iidr(struct kvm *kvm,
  332. struct vgic_its *its,
  333. gpa_t addr, unsigned int len)
  334. {
  335. return (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
  336. }
  337. static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
  338. struct vgic_its *its,
  339. gpa_t addr, unsigned int len)
  340. {
  341. switch (addr & 0xffff) {
  342. case GITS_PIDR0:
  343. return 0x92; /* part number, bits[7:0] */
  344. case GITS_PIDR1:
  345. return 0xb4; /* part number, bits[11:8] */
  346. case GITS_PIDR2:
  347. return GIC_PIDR2_ARCH_GICv3 | 0x0b;
  348. case GITS_PIDR4:
  349. return 0x40; /* This is a 64K software visible page */
  350. /* The following are the ID registers for (any) GIC. */
  351. case GITS_CIDR0:
  352. return 0x0d;
  353. case GITS_CIDR1:
  354. return 0xf0;
  355. case GITS_CIDR2:
  356. return 0x05;
  357. case GITS_CIDR3:
  358. return 0xb1;
  359. }
  360. return 0;
  361. }
  362. /*
  363. * Find the target VCPU and the LPI number for a given devid/eventid pair
  364. * and make this IRQ pending, possibly injecting it.
  365. * Must be called with the its_lock mutex held.
  366. * Returns 0 on success, a positive error value for any ITS mapping
  367. * related errors and negative error values for generic errors.
  368. */
  369. static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
  370. u32 devid, u32 eventid)
  371. {
  372. struct kvm_vcpu *vcpu;
  373. struct its_itte *itte;
  374. if (!its->enabled)
  375. return -EBUSY;
  376. itte = find_itte(its, devid, eventid);
  377. if (!itte || !its_is_collection_mapped(itte->collection))
  378. return E_ITS_INT_UNMAPPED_INTERRUPT;
  379. vcpu = kvm_get_vcpu(kvm, itte->collection->target_addr);
  380. if (!vcpu)
  381. return E_ITS_INT_UNMAPPED_INTERRUPT;
  382. if (!vcpu->arch.vgic_cpu.lpis_enabled)
  383. return -EBUSY;
  384. spin_lock(&itte->irq->irq_lock);
  385. itte->irq->pending = true;
  386. vgic_queue_irq_unlock(kvm, itte->irq);
  387. return 0;
  388. }
  389. static struct vgic_io_device *vgic_get_its_iodev(struct kvm_io_device *dev)
  390. {
  391. struct vgic_io_device *iodev;
  392. if (dev->ops != &kvm_io_gic_ops)
  393. return NULL;
  394. iodev = container_of(dev, struct vgic_io_device, dev);
  395. if (iodev->iodev_type != IODEV_ITS)
  396. return NULL;
  397. return iodev;
  398. }
  399. /*
  400. * Queries the KVM IO bus framework to get the ITS pointer from the given
  401. * doorbell address.
  402. * We then call vgic_its_trigger_msi() with the decoded data.
  403. * According to the KVM_SIGNAL_MSI API description returns 1 on success.
  404. */
  405. int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
  406. {
  407. u64 address;
  408. struct kvm_io_device *kvm_io_dev;
  409. struct vgic_io_device *iodev;
  410. int ret;
  411. if (!vgic_has_its(kvm))
  412. return -ENODEV;
  413. if (!(msi->flags & KVM_MSI_VALID_DEVID))
  414. return -EINVAL;
  415. address = (u64)msi->address_hi << 32 | msi->address_lo;
  416. kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
  417. if (!kvm_io_dev)
  418. return -EINVAL;
  419. iodev = vgic_get_its_iodev(kvm_io_dev);
  420. if (!iodev)
  421. return -EINVAL;
  422. mutex_lock(&iodev->its->its_lock);
  423. ret = vgic_its_trigger_msi(kvm, iodev->its, msi->devid, msi->data);
  424. mutex_unlock(&iodev->its->its_lock);
  425. if (ret < 0)
  426. return ret;
  427. /*
  428. * KVM_SIGNAL_MSI demands a return value > 0 for success and 0
  429. * if the guest has blocked the MSI. So we map any LPI mapping
  430. * related error to that.
  431. */
  432. if (ret)
  433. return 0;
  434. else
  435. return 1;
  436. }
  437. /* Requires the its_lock to be held. */
  438. static void its_free_itte(struct kvm *kvm, struct its_itte *itte)
  439. {
  440. list_del(&itte->itte_list);
  441. /* This put matches the get in vgic_add_lpi. */
  442. if (itte->irq)
  443. vgic_put_irq(kvm, itte->irq);
  444. kfree(itte);
  445. }
  446. static u64 its_cmd_mask_field(u64 *its_cmd, int word, int shift, int size)
  447. {
  448. return (le64_to_cpu(its_cmd[word]) >> shift) & (BIT_ULL(size) - 1);
  449. }
  450. #define its_cmd_get_command(cmd) its_cmd_mask_field(cmd, 0, 0, 8)
  451. #define its_cmd_get_deviceid(cmd) its_cmd_mask_field(cmd, 0, 32, 32)
  452. #define its_cmd_get_id(cmd) its_cmd_mask_field(cmd, 1, 0, 32)
  453. #define its_cmd_get_physical_id(cmd) its_cmd_mask_field(cmd, 1, 32, 32)
  454. #define its_cmd_get_collection(cmd) its_cmd_mask_field(cmd, 2, 0, 16)
  455. #define its_cmd_get_target_addr(cmd) its_cmd_mask_field(cmd, 2, 16, 32)
  456. #define its_cmd_get_validbit(cmd) its_cmd_mask_field(cmd, 2, 63, 1)
  457. /*
  458. * The DISCARD command frees an Interrupt Translation Table Entry (ITTE).
  459. * Must be called with the its_lock mutex held.
  460. */
  461. static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
  462. u64 *its_cmd)
  463. {
  464. u32 device_id = its_cmd_get_deviceid(its_cmd);
  465. u32 event_id = its_cmd_get_id(its_cmd);
  466. struct its_itte *itte;
  467. itte = find_itte(its, device_id, event_id);
  468. if (itte && itte->collection) {
  469. /*
  470. * Though the spec talks about removing the pending state, we
  471. * don't bother here since we clear the ITTE anyway and the
  472. * pending state is a property of the ITTE struct.
  473. */
  474. its_free_itte(kvm, itte);
  475. return 0;
  476. }
  477. return E_ITS_DISCARD_UNMAPPED_INTERRUPT;
  478. }
  479. /*
  480. * The MOVI command moves an ITTE to a different collection.
  481. * Must be called with the its_lock mutex held.
  482. */
  483. static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
  484. u64 *its_cmd)
  485. {
  486. u32 device_id = its_cmd_get_deviceid(its_cmd);
  487. u32 event_id = its_cmd_get_id(its_cmd);
  488. u32 coll_id = its_cmd_get_collection(its_cmd);
  489. struct kvm_vcpu *vcpu;
  490. struct its_itte *itte;
  491. struct its_collection *collection;
  492. itte = find_itte(its, device_id, event_id);
  493. if (!itte)
  494. return E_ITS_MOVI_UNMAPPED_INTERRUPT;
  495. if (!its_is_collection_mapped(itte->collection))
  496. return E_ITS_MOVI_UNMAPPED_COLLECTION;
  497. collection = find_collection(its, coll_id);
  498. if (!its_is_collection_mapped(collection))
  499. return E_ITS_MOVI_UNMAPPED_COLLECTION;
  500. itte->collection = collection;
  501. vcpu = kvm_get_vcpu(kvm, collection->target_addr);
  502. spin_lock(&itte->irq->irq_lock);
  503. itte->irq->target_vcpu = vcpu;
  504. spin_unlock(&itte->irq->irq_lock);
  505. return 0;
  506. }
  507. /*
  508. * Check whether an ID can be stored into the corresponding guest table.
  509. * For a direct table this is pretty easy, but gets a bit nasty for
  510. * indirect tables. We check whether the resulting guest physical address
  511. * is actually valid (covered by a memslot and guest accessbible).
  512. * For this we have to read the respective first level entry.
  513. */
  514. static bool vgic_its_check_id(struct vgic_its *its, u64 baser, int id)
  515. {
  516. int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
  517. int index;
  518. u64 indirect_ptr;
  519. gfn_t gfn;
  520. if (!(baser & GITS_BASER_INDIRECT)) {
  521. phys_addr_t addr;
  522. if (id >= (l1_tbl_size / GITS_BASER_ENTRY_SIZE(baser)))
  523. return false;
  524. addr = BASER_ADDRESS(baser) + id * GITS_BASER_ENTRY_SIZE(baser);
  525. gfn = addr >> PAGE_SHIFT;
  526. return kvm_is_visible_gfn(its->dev->kvm, gfn);
  527. }
  528. /* calculate and check the index into the 1st level */
  529. index = id / (SZ_64K / GITS_BASER_ENTRY_SIZE(baser));
  530. if (index >= (l1_tbl_size / sizeof(u64)))
  531. return false;
  532. /* Each 1st level entry is represented by a 64-bit value. */
  533. if (kvm_read_guest(its->dev->kvm,
  534. BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
  535. &indirect_ptr, sizeof(indirect_ptr)))
  536. return false;
  537. indirect_ptr = le64_to_cpu(indirect_ptr);
  538. /* check the valid bit of the first level entry */
  539. if (!(indirect_ptr & BIT_ULL(63)))
  540. return false;
  541. /*
  542. * Mask the guest physical address and calculate the frame number.
  543. * Any address beyond our supported 48 bits of PA will be caught
  544. * by the actual check in the final step.
  545. */
  546. indirect_ptr &= GENMASK_ULL(51, 16);
  547. /* Find the address of the actual entry */
  548. index = id % (SZ_64K / GITS_BASER_ENTRY_SIZE(baser));
  549. indirect_ptr += index * GITS_BASER_ENTRY_SIZE(baser);
  550. gfn = indirect_ptr >> PAGE_SHIFT;
  551. return kvm_is_visible_gfn(its->dev->kvm, gfn);
  552. }
  553. static int vgic_its_alloc_collection(struct vgic_its *its,
  554. struct its_collection **colp,
  555. u32 coll_id)
  556. {
  557. struct its_collection *collection;
  558. if (!vgic_its_check_id(its, its->baser_coll_table, coll_id))
  559. return E_ITS_MAPC_COLLECTION_OOR;
  560. collection = kzalloc(sizeof(*collection), GFP_KERNEL);
  561. collection->collection_id = coll_id;
  562. collection->target_addr = COLLECTION_NOT_MAPPED;
  563. list_add_tail(&collection->coll_list, &its->collection_list);
  564. *colp = collection;
  565. return 0;
  566. }
  567. static void vgic_its_free_collection(struct vgic_its *its, u32 coll_id)
  568. {
  569. struct its_collection *collection;
  570. struct its_device *device;
  571. struct its_itte *itte;
  572. /*
  573. * Clearing the mapping for that collection ID removes the
  574. * entry from the list. If there wasn't any before, we can
  575. * go home early.
  576. */
  577. collection = find_collection(its, coll_id);
  578. if (!collection)
  579. return;
  580. for_each_lpi_its(device, itte, its)
  581. if (itte->collection &&
  582. itte->collection->collection_id == coll_id)
  583. itte->collection = NULL;
  584. list_del(&collection->coll_list);
  585. kfree(collection);
  586. }
  587. /*
  588. * The MAPTI and MAPI commands map LPIs to ITTEs.
  589. * Must be called with its_lock mutex held.
  590. */
  591. static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
  592. u64 *its_cmd)
  593. {
  594. u32 device_id = its_cmd_get_deviceid(its_cmd);
  595. u32 event_id = its_cmd_get_id(its_cmd);
  596. u32 coll_id = its_cmd_get_collection(its_cmd);
  597. struct its_itte *itte;
  598. struct its_device *device;
  599. struct its_collection *collection, *new_coll = NULL;
  600. int lpi_nr;
  601. struct vgic_irq *irq;
  602. device = find_its_device(its, device_id);
  603. if (!device)
  604. return E_ITS_MAPTI_UNMAPPED_DEVICE;
  605. if (its_cmd_get_command(its_cmd) == GITS_CMD_MAPTI)
  606. lpi_nr = its_cmd_get_physical_id(its_cmd);
  607. else
  608. lpi_nr = event_id;
  609. if (lpi_nr < GIC_LPI_OFFSET ||
  610. lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
  611. return E_ITS_MAPTI_PHYSICALID_OOR;
  612. /* If there is an existing mapping, behavior is UNPREDICTABLE. */
  613. if (find_itte(its, device_id, event_id))
  614. return 0;
  615. collection = find_collection(its, coll_id);
  616. if (!collection) {
  617. int ret = vgic_its_alloc_collection(its, &collection, coll_id);
  618. if (ret)
  619. return ret;
  620. new_coll = collection;
  621. }
  622. itte = kzalloc(sizeof(struct its_itte), GFP_KERNEL);
  623. if (!itte) {
  624. if (new_coll)
  625. vgic_its_free_collection(its, coll_id);
  626. return -ENOMEM;
  627. }
  628. itte->event_id = event_id;
  629. list_add_tail(&itte->itte_list, &device->itt_head);
  630. itte->collection = collection;
  631. itte->lpi = lpi_nr;
  632. irq = vgic_add_lpi(kvm, lpi_nr);
  633. if (IS_ERR(irq)) {
  634. if (new_coll)
  635. vgic_its_free_collection(its, coll_id);
  636. its_free_itte(kvm, itte);
  637. return PTR_ERR(irq);
  638. }
  639. itte->irq = irq;
  640. update_affinity_itte(kvm, itte);
  641. /*
  642. * We "cache" the configuration table entries in out struct vgic_irq's.
  643. * However we only have those structs for mapped IRQs, so we read in
  644. * the respective config data from memory here upon mapping the LPI.
  645. */
  646. update_lpi_config(kvm, itte->irq, NULL);
  647. return 0;
  648. }
  649. /* Requires the its_lock to be held. */
  650. static void vgic_its_unmap_device(struct kvm *kvm, struct its_device *device)
  651. {
  652. struct its_itte *itte, *temp;
  653. /*
  654. * The spec says that unmapping a device with still valid
  655. * ITTEs associated is UNPREDICTABLE. We remove all ITTEs,
  656. * since we cannot leave the memory unreferenced.
  657. */
  658. list_for_each_entry_safe(itte, temp, &device->itt_head, itte_list)
  659. its_free_itte(kvm, itte);
  660. list_del(&device->dev_list);
  661. kfree(device);
  662. }
  663. /*
  664. * MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs).
  665. * Must be called with the its_lock mutex held.
  666. */
  667. static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
  668. u64 *its_cmd)
  669. {
  670. u32 device_id = its_cmd_get_deviceid(its_cmd);
  671. bool valid = its_cmd_get_validbit(its_cmd);
  672. struct its_device *device;
  673. if (!vgic_its_check_id(its, its->baser_device_table, device_id))
  674. return E_ITS_MAPD_DEVICE_OOR;
  675. device = find_its_device(its, device_id);
  676. /*
  677. * The spec says that calling MAPD on an already mapped device
  678. * invalidates all cached data for this device. We implement this
  679. * by removing the mapping and re-establishing it.
  680. */
  681. if (device)
  682. vgic_its_unmap_device(kvm, device);
  683. /*
  684. * The spec does not say whether unmapping a not-mapped device
  685. * is an error, so we are done in any case.
  686. */
  687. if (!valid)
  688. return 0;
  689. device = kzalloc(sizeof(struct its_device), GFP_KERNEL);
  690. if (!device)
  691. return -ENOMEM;
  692. device->device_id = device_id;
  693. INIT_LIST_HEAD(&device->itt_head);
  694. list_add_tail(&device->dev_list, &its->device_list);
  695. return 0;
  696. }
  697. /*
  698. * The MAPC command maps collection IDs to redistributors.
  699. * Must be called with the its_lock mutex held.
  700. */
  701. static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
  702. u64 *its_cmd)
  703. {
  704. u16 coll_id;
  705. u32 target_addr;
  706. struct its_collection *collection;
  707. bool valid;
  708. valid = its_cmd_get_validbit(its_cmd);
  709. coll_id = its_cmd_get_collection(its_cmd);
  710. target_addr = its_cmd_get_target_addr(its_cmd);
  711. if (target_addr >= atomic_read(&kvm->online_vcpus))
  712. return E_ITS_MAPC_PROCNUM_OOR;
  713. if (!valid) {
  714. vgic_its_free_collection(its, coll_id);
  715. } else {
  716. collection = find_collection(its, coll_id);
  717. if (!collection) {
  718. int ret;
  719. ret = vgic_its_alloc_collection(its, &collection,
  720. coll_id);
  721. if (ret)
  722. return ret;
  723. collection->target_addr = target_addr;
  724. } else {
  725. collection->target_addr = target_addr;
  726. update_affinity_collection(kvm, its, collection);
  727. }
  728. }
  729. return 0;
  730. }
  731. /*
  732. * The CLEAR command removes the pending state for a particular LPI.
  733. * Must be called with the its_lock mutex held.
  734. */
  735. static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
  736. u64 *its_cmd)
  737. {
  738. u32 device_id = its_cmd_get_deviceid(its_cmd);
  739. u32 event_id = its_cmd_get_id(its_cmd);
  740. struct its_itte *itte;
  741. itte = find_itte(its, device_id, event_id);
  742. if (!itte)
  743. return E_ITS_CLEAR_UNMAPPED_INTERRUPT;
  744. itte->irq->pending = false;
  745. return 0;
  746. }
  747. /*
  748. * The INV command syncs the configuration bits from the memory table.
  749. * Must be called with the its_lock mutex held.
  750. */
  751. static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
  752. u64 *its_cmd)
  753. {
  754. u32 device_id = its_cmd_get_deviceid(its_cmd);
  755. u32 event_id = its_cmd_get_id(its_cmd);
  756. struct its_itte *itte;
  757. itte = find_itte(its, device_id, event_id);
  758. if (!itte)
  759. return E_ITS_INV_UNMAPPED_INTERRUPT;
  760. return update_lpi_config(kvm, itte->irq, NULL);
  761. }
  762. /*
  763. * The INVALL command requests flushing of all IRQ data in this collection.
  764. * Find the VCPU mapped to that collection, then iterate over the VM's list
  765. * of mapped LPIs and update the configuration for each IRQ which targets
  766. * the specified vcpu. The configuration will be read from the in-memory
  767. * configuration table.
  768. * Must be called with the its_lock mutex held.
  769. */
  770. static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
  771. u64 *its_cmd)
  772. {
  773. u32 coll_id = its_cmd_get_collection(its_cmd);
  774. struct its_collection *collection;
  775. struct kvm_vcpu *vcpu;
  776. struct vgic_irq *irq;
  777. u32 *intids;
  778. int irq_count, i;
  779. collection = find_collection(its, coll_id);
  780. if (!its_is_collection_mapped(collection))
  781. return E_ITS_INVALL_UNMAPPED_COLLECTION;
  782. vcpu = kvm_get_vcpu(kvm, collection->target_addr);
  783. irq_count = vgic_copy_lpi_list(kvm, &intids);
  784. if (irq_count < 0)
  785. return irq_count;
  786. for (i = 0; i < irq_count; i++) {
  787. irq = vgic_get_irq(kvm, NULL, intids[i]);
  788. if (!irq)
  789. continue;
  790. update_lpi_config(kvm, irq, vcpu);
  791. vgic_put_irq(kvm, irq);
  792. }
  793. kfree(intids);
  794. return 0;
  795. }
  796. /*
  797. * The MOVALL command moves the pending state of all IRQs targeting one
  798. * redistributor to another. We don't hold the pending state in the VCPUs,
  799. * but in the IRQs instead, so there is really not much to do for us here.
  800. * However the spec says that no IRQ must target the old redistributor
  801. * afterwards, so we make sure that no LPI is using the associated target_vcpu.
  802. * This command affects all LPIs in the system that target that redistributor.
  803. */
  804. static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
  805. u64 *its_cmd)
  806. {
  807. struct vgic_dist *dist = &kvm->arch.vgic;
  808. u32 target1_addr = its_cmd_get_target_addr(its_cmd);
  809. u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32);
  810. struct kvm_vcpu *vcpu1, *vcpu2;
  811. struct vgic_irq *irq;
  812. if (target1_addr >= atomic_read(&kvm->online_vcpus) ||
  813. target2_addr >= atomic_read(&kvm->online_vcpus))
  814. return E_ITS_MOVALL_PROCNUM_OOR;
  815. if (target1_addr == target2_addr)
  816. return 0;
  817. vcpu1 = kvm_get_vcpu(kvm, target1_addr);
  818. vcpu2 = kvm_get_vcpu(kvm, target2_addr);
  819. spin_lock(&dist->lpi_list_lock);
  820. list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
  821. spin_lock(&irq->irq_lock);
  822. if (irq->target_vcpu == vcpu1)
  823. irq->target_vcpu = vcpu2;
  824. spin_unlock(&irq->irq_lock);
  825. }
  826. spin_unlock(&dist->lpi_list_lock);
  827. return 0;
  828. }
  829. /*
  830. * The INT command injects the LPI associated with that DevID/EvID pair.
  831. * Must be called with the its_lock mutex held.
  832. */
  833. static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its,
  834. u64 *its_cmd)
  835. {
  836. u32 msi_data = its_cmd_get_id(its_cmd);
  837. u64 msi_devid = its_cmd_get_deviceid(its_cmd);
  838. return vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
  839. }
  840. /*
  841. * This function is called with the its_cmd lock held, but the ITS data
  842. * structure lock dropped.
  843. */
  844. static int vgic_its_handle_command(struct kvm *kvm, struct vgic_its *its,
  845. u64 *its_cmd)
  846. {
  847. int ret = -ENODEV;
  848. mutex_lock(&its->its_lock);
  849. switch (its_cmd_get_command(its_cmd)) {
  850. case GITS_CMD_MAPD:
  851. ret = vgic_its_cmd_handle_mapd(kvm, its, its_cmd);
  852. break;
  853. case GITS_CMD_MAPC:
  854. ret = vgic_its_cmd_handle_mapc(kvm, its, its_cmd);
  855. break;
  856. case GITS_CMD_MAPI:
  857. ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
  858. break;
  859. case GITS_CMD_MAPTI:
  860. ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
  861. break;
  862. case GITS_CMD_MOVI:
  863. ret = vgic_its_cmd_handle_movi(kvm, its, its_cmd);
  864. break;
  865. case GITS_CMD_DISCARD:
  866. ret = vgic_its_cmd_handle_discard(kvm, its, its_cmd);
  867. break;
  868. case GITS_CMD_CLEAR:
  869. ret = vgic_its_cmd_handle_clear(kvm, its, its_cmd);
  870. break;
  871. case GITS_CMD_MOVALL:
  872. ret = vgic_its_cmd_handle_movall(kvm, its, its_cmd);
  873. break;
  874. case GITS_CMD_INT:
  875. ret = vgic_its_cmd_handle_int(kvm, its, its_cmd);
  876. break;
  877. case GITS_CMD_INV:
  878. ret = vgic_its_cmd_handle_inv(kvm, its, its_cmd);
  879. break;
  880. case GITS_CMD_INVALL:
  881. ret = vgic_its_cmd_handle_invall(kvm, its, its_cmd);
  882. break;
  883. case GITS_CMD_SYNC:
  884. /* we ignore this command: we are in sync all of the time */
  885. ret = 0;
  886. break;
  887. }
  888. mutex_unlock(&its->its_lock);
  889. return ret;
  890. }
  891. static u64 vgic_sanitise_its_baser(u64 reg)
  892. {
  893. reg = vgic_sanitise_field(reg, GITS_BASER_SHAREABILITY_MASK,
  894. GITS_BASER_SHAREABILITY_SHIFT,
  895. vgic_sanitise_shareability);
  896. reg = vgic_sanitise_field(reg, GITS_BASER_INNER_CACHEABILITY_MASK,
  897. GITS_BASER_INNER_CACHEABILITY_SHIFT,
  898. vgic_sanitise_inner_cacheability);
  899. reg = vgic_sanitise_field(reg, GITS_BASER_OUTER_CACHEABILITY_MASK,
  900. GITS_BASER_OUTER_CACHEABILITY_SHIFT,
  901. vgic_sanitise_outer_cacheability);
  902. /* Bits 15:12 contain bits 51:48 of the PA, which we don't support. */
  903. reg &= ~GENMASK_ULL(15, 12);
  904. /* We support only one (ITS) page size: 64K */
  905. reg = (reg & ~GITS_BASER_PAGE_SIZE_MASK) | GITS_BASER_PAGE_SIZE_64K;
  906. return reg;
  907. }
  908. static u64 vgic_sanitise_its_cbaser(u64 reg)
  909. {
  910. reg = vgic_sanitise_field(reg, GITS_CBASER_SHAREABILITY_MASK,
  911. GITS_CBASER_SHAREABILITY_SHIFT,
  912. vgic_sanitise_shareability);
  913. reg = vgic_sanitise_field(reg, GITS_CBASER_INNER_CACHEABILITY_MASK,
  914. GITS_CBASER_INNER_CACHEABILITY_SHIFT,
  915. vgic_sanitise_inner_cacheability);
  916. reg = vgic_sanitise_field(reg, GITS_CBASER_OUTER_CACHEABILITY_MASK,
  917. GITS_CBASER_OUTER_CACHEABILITY_SHIFT,
  918. vgic_sanitise_outer_cacheability);
  919. /*
  920. * Sanitise the physical address to be 64k aligned.
  921. * Also limit the physical addresses to 48 bits.
  922. */
  923. reg &= ~(GENMASK_ULL(51, 48) | GENMASK_ULL(15, 12));
  924. return reg;
  925. }
  926. static unsigned long vgic_mmio_read_its_cbaser(struct kvm *kvm,
  927. struct vgic_its *its,
  928. gpa_t addr, unsigned int len)
  929. {
  930. return extract_bytes(its->cbaser, addr & 7, len);
  931. }
  932. static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
  933. gpa_t addr, unsigned int len,
  934. unsigned long val)
  935. {
  936. /* When GITS_CTLR.Enable is 1, this register is RO. */
  937. if (its->enabled)
  938. return;
  939. mutex_lock(&its->cmd_lock);
  940. its->cbaser = update_64bit_reg(its->cbaser, addr & 7, len, val);
  941. its->cbaser = vgic_sanitise_its_cbaser(its->cbaser);
  942. its->creadr = 0;
  943. /*
  944. * CWRITER is architecturally UNKNOWN on reset, but we need to reset
  945. * it to CREADR to make sure we start with an empty command buffer.
  946. */
  947. its->cwriter = its->creadr;
  948. mutex_unlock(&its->cmd_lock);
  949. }
  950. #define ITS_CMD_BUFFER_SIZE(baser) ((((baser) & 0xff) + 1) << 12)
  951. #define ITS_CMD_SIZE 32
  952. #define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5))
  953. /*
  954. * By writing to CWRITER the guest announces new commands to be processed.
  955. * To avoid any races in the first place, we take the its_cmd lock, which
  956. * protects our ring buffer variables, so that there is only one user
  957. * per ITS handling commands at a given time.
  958. */
  959. static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
  960. gpa_t addr, unsigned int len,
  961. unsigned long val)
  962. {
  963. gpa_t cbaser;
  964. u64 cmd_buf[4];
  965. u32 reg;
  966. if (!its)
  967. return;
  968. mutex_lock(&its->cmd_lock);
  969. reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
  970. reg = ITS_CMD_OFFSET(reg);
  971. if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
  972. mutex_unlock(&its->cmd_lock);
  973. return;
  974. }
  975. its->cwriter = reg;
  976. cbaser = CBASER_ADDRESS(its->cbaser);
  977. while (its->cwriter != its->creadr) {
  978. int ret = kvm_read_guest(kvm, cbaser + its->creadr,
  979. cmd_buf, ITS_CMD_SIZE);
  980. /*
  981. * If kvm_read_guest() fails, this could be due to the guest
  982. * programming a bogus value in CBASER or something else going
  983. * wrong from which we cannot easily recover.
  984. * According to section 6.3.2 in the GICv3 spec we can just
  985. * ignore that command then.
  986. */
  987. if (!ret)
  988. vgic_its_handle_command(kvm, its, cmd_buf);
  989. its->creadr += ITS_CMD_SIZE;
  990. if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
  991. its->creadr = 0;
  992. }
  993. mutex_unlock(&its->cmd_lock);
  994. }
  995. static unsigned long vgic_mmio_read_its_cwriter(struct kvm *kvm,
  996. struct vgic_its *its,
  997. gpa_t addr, unsigned int len)
  998. {
  999. return extract_bytes(its->cwriter, addr & 0x7, len);
  1000. }
  1001. static unsigned long vgic_mmio_read_its_creadr(struct kvm *kvm,
  1002. struct vgic_its *its,
  1003. gpa_t addr, unsigned int len)
  1004. {
  1005. return extract_bytes(its->creadr, addr & 0x7, len);
  1006. }
  1007. #define BASER_INDEX(addr) (((addr) / sizeof(u64)) & 0x7)
  1008. static unsigned long vgic_mmio_read_its_baser(struct kvm *kvm,
  1009. struct vgic_its *its,
  1010. gpa_t addr, unsigned int len)
  1011. {
  1012. u64 reg;
  1013. switch (BASER_INDEX(addr)) {
  1014. case 0:
  1015. reg = its->baser_device_table;
  1016. break;
  1017. case 1:
  1018. reg = its->baser_coll_table;
  1019. break;
  1020. default:
  1021. reg = 0;
  1022. break;
  1023. }
  1024. return extract_bytes(reg, addr & 7, len);
  1025. }
  1026. #define GITS_BASER_RO_MASK (GENMASK_ULL(52, 48) | GENMASK_ULL(58, 56))
  1027. static void vgic_mmio_write_its_baser(struct kvm *kvm,
  1028. struct vgic_its *its,
  1029. gpa_t addr, unsigned int len,
  1030. unsigned long val)
  1031. {
  1032. u64 entry_size, device_type;
  1033. u64 reg, *regptr, clearbits = 0;
  1034. /* When GITS_CTLR.Enable is 1, we ignore write accesses. */
  1035. if (its->enabled)
  1036. return;
  1037. switch (BASER_INDEX(addr)) {
  1038. case 0:
  1039. regptr = &its->baser_device_table;
  1040. entry_size = 8;
  1041. device_type = GITS_BASER_TYPE_DEVICE;
  1042. break;
  1043. case 1:
  1044. regptr = &its->baser_coll_table;
  1045. entry_size = 8;
  1046. device_type = GITS_BASER_TYPE_COLLECTION;
  1047. clearbits = GITS_BASER_INDIRECT;
  1048. break;
  1049. default:
  1050. return;
  1051. }
  1052. reg = update_64bit_reg(*regptr, addr & 7, len, val);
  1053. reg &= ~GITS_BASER_RO_MASK;
  1054. reg &= ~clearbits;
  1055. reg |= (entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT;
  1056. reg |= device_type << GITS_BASER_TYPE_SHIFT;
  1057. reg = vgic_sanitise_its_baser(reg);
  1058. *regptr = reg;
  1059. }
  1060. #define REGISTER_ITS_DESC(off, rd, wr, length, acc) \
  1061. { \
  1062. .reg_offset = off, \
  1063. .len = length, \
  1064. .access_flags = acc, \
  1065. .its_read = rd, \
  1066. .its_write = wr, \
  1067. }
  1068. static void its_mmio_write_wi(struct kvm *kvm, struct vgic_its *its,
  1069. gpa_t addr, unsigned int len, unsigned long val)
  1070. {
  1071. /* Ignore */
  1072. }
  1073. static struct vgic_register_region its_registers[] = {
  1074. REGISTER_ITS_DESC(GITS_CTLR,
  1075. vgic_mmio_read_its_ctlr, vgic_mmio_write_its_ctlr, 4,
  1076. VGIC_ACCESS_32bit),
  1077. REGISTER_ITS_DESC(GITS_IIDR,
  1078. vgic_mmio_read_its_iidr, its_mmio_write_wi, 4,
  1079. VGIC_ACCESS_32bit),
  1080. REGISTER_ITS_DESC(GITS_TYPER,
  1081. vgic_mmio_read_its_typer, its_mmio_write_wi, 8,
  1082. VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
  1083. REGISTER_ITS_DESC(GITS_CBASER,
  1084. vgic_mmio_read_its_cbaser, vgic_mmio_write_its_cbaser, 8,
  1085. VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
  1086. REGISTER_ITS_DESC(GITS_CWRITER,
  1087. vgic_mmio_read_its_cwriter, vgic_mmio_write_its_cwriter, 8,
  1088. VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
  1089. REGISTER_ITS_DESC(GITS_CREADR,
  1090. vgic_mmio_read_its_creadr, its_mmio_write_wi, 8,
  1091. VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
  1092. REGISTER_ITS_DESC(GITS_BASER,
  1093. vgic_mmio_read_its_baser, vgic_mmio_write_its_baser, 0x40,
  1094. VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
  1095. REGISTER_ITS_DESC(GITS_IDREGS_BASE,
  1096. vgic_mmio_read_its_idregs, its_mmio_write_wi, 0x30,
  1097. VGIC_ACCESS_32bit),
  1098. };
  1099. /* This is called on setting the LPI enable bit in the redistributor. */
  1100. void vgic_enable_lpis(struct kvm_vcpu *vcpu)
  1101. {
  1102. if (!(vcpu->arch.vgic_cpu.pendbaser & GICR_PENDBASER_PTZ))
  1103. its_sync_lpi_pending_table(vcpu);
  1104. }
  1105. static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its)
  1106. {
  1107. struct vgic_io_device *iodev = &its->iodev;
  1108. int ret;
  1109. if (!its->initialized)
  1110. return -EBUSY;
  1111. if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base))
  1112. return -ENXIO;
  1113. iodev->regions = its_registers;
  1114. iodev->nr_regions = ARRAY_SIZE(its_registers);
  1115. kvm_iodevice_init(&iodev->dev, &kvm_io_gic_ops);
  1116. iodev->base_addr = its->vgic_its_base;
  1117. iodev->iodev_type = IODEV_ITS;
  1118. iodev->its = its;
  1119. mutex_lock(&kvm->slots_lock);
  1120. ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, iodev->base_addr,
  1121. KVM_VGIC_V3_ITS_SIZE, &iodev->dev);
  1122. mutex_unlock(&kvm->slots_lock);
  1123. return ret;
  1124. }
  1125. #define INITIAL_BASER_VALUE \
  1126. (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) | \
  1127. GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner) | \
  1128. GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) | \
  1129. ((8ULL - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | \
  1130. GITS_BASER_PAGE_SIZE_64K)
  1131. #define INITIAL_PROPBASER_VALUE \
  1132. (GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) | \
  1133. GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, SameAsInner) | \
  1134. GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable))
  1135. static int vgic_its_create(struct kvm_device *dev, u32 type)
  1136. {
  1137. struct vgic_its *its;
  1138. if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
  1139. return -ENODEV;
  1140. its = kzalloc(sizeof(struct vgic_its), GFP_KERNEL);
  1141. if (!its)
  1142. return -ENOMEM;
  1143. mutex_init(&its->its_lock);
  1144. mutex_init(&its->cmd_lock);
  1145. its->vgic_its_base = VGIC_ADDR_UNDEF;
  1146. INIT_LIST_HEAD(&its->device_list);
  1147. INIT_LIST_HEAD(&its->collection_list);
  1148. dev->kvm->arch.vgic.has_its = true;
  1149. its->initialized = false;
  1150. its->enabled = false;
  1151. its->dev = dev;
  1152. its->baser_device_table = INITIAL_BASER_VALUE |
  1153. ((u64)GITS_BASER_TYPE_DEVICE << GITS_BASER_TYPE_SHIFT);
  1154. its->baser_coll_table = INITIAL_BASER_VALUE |
  1155. ((u64)GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT);
  1156. dev->kvm->arch.vgic.propbaser = INITIAL_PROPBASER_VALUE;
  1157. dev->private = its;
  1158. return 0;
  1159. }
  1160. static void vgic_its_destroy(struct kvm_device *kvm_dev)
  1161. {
  1162. struct kvm *kvm = kvm_dev->kvm;
  1163. struct vgic_its *its = kvm_dev->private;
  1164. struct its_device *dev;
  1165. struct its_itte *itte;
  1166. struct list_head *dev_cur, *dev_temp;
  1167. struct list_head *cur, *temp;
  1168. /*
  1169. * We may end up here without the lists ever having been initialized.
  1170. * Check this and bail out early to avoid dereferencing a NULL pointer.
  1171. */
  1172. if (!its->device_list.next)
  1173. return;
  1174. mutex_lock(&its->its_lock);
  1175. list_for_each_safe(dev_cur, dev_temp, &its->device_list) {
  1176. dev = container_of(dev_cur, struct its_device, dev_list);
  1177. list_for_each_safe(cur, temp, &dev->itt_head) {
  1178. itte = (container_of(cur, struct its_itte, itte_list));
  1179. its_free_itte(kvm, itte);
  1180. }
  1181. list_del(dev_cur);
  1182. kfree(dev);
  1183. }
  1184. list_for_each_safe(cur, temp, &its->collection_list) {
  1185. list_del(cur);
  1186. kfree(container_of(cur, struct its_collection, coll_list));
  1187. }
  1188. mutex_unlock(&its->its_lock);
  1189. kfree(its);
  1190. }
  1191. static int vgic_its_has_attr(struct kvm_device *dev,
  1192. struct kvm_device_attr *attr)
  1193. {
  1194. switch (attr->group) {
  1195. case KVM_DEV_ARM_VGIC_GRP_ADDR:
  1196. switch (attr->attr) {
  1197. case KVM_VGIC_ITS_ADDR_TYPE:
  1198. return 0;
  1199. }
  1200. break;
  1201. case KVM_DEV_ARM_VGIC_GRP_CTRL:
  1202. switch (attr->attr) {
  1203. case KVM_DEV_ARM_VGIC_CTRL_INIT:
  1204. return 0;
  1205. }
  1206. break;
  1207. }
  1208. return -ENXIO;
  1209. }
  1210. static int vgic_its_set_attr(struct kvm_device *dev,
  1211. struct kvm_device_attr *attr)
  1212. {
  1213. struct vgic_its *its = dev->private;
  1214. int ret;
  1215. switch (attr->group) {
  1216. case KVM_DEV_ARM_VGIC_GRP_ADDR: {
  1217. u64 __user *uaddr = (u64 __user *)(long)attr->addr;
  1218. unsigned long type = (unsigned long)attr->attr;
  1219. u64 addr;
  1220. if (type != KVM_VGIC_ITS_ADDR_TYPE)
  1221. return -ENODEV;
  1222. if (copy_from_user(&addr, uaddr, sizeof(addr)))
  1223. return -EFAULT;
  1224. ret = vgic_check_ioaddr(dev->kvm, &its->vgic_its_base,
  1225. addr, SZ_64K);
  1226. if (ret)
  1227. return ret;
  1228. its->vgic_its_base = addr;
  1229. return 0;
  1230. }
  1231. case KVM_DEV_ARM_VGIC_GRP_CTRL:
  1232. switch (attr->attr) {
  1233. case KVM_DEV_ARM_VGIC_CTRL_INIT:
  1234. its->initialized = true;
  1235. return 0;
  1236. }
  1237. break;
  1238. }
  1239. return -ENXIO;
  1240. }
  1241. static int vgic_its_get_attr(struct kvm_device *dev,
  1242. struct kvm_device_attr *attr)
  1243. {
  1244. switch (attr->group) {
  1245. case KVM_DEV_ARM_VGIC_GRP_ADDR: {
  1246. struct vgic_its *its = dev->private;
  1247. u64 addr = its->vgic_its_base;
  1248. u64 __user *uaddr = (u64 __user *)(long)attr->addr;
  1249. unsigned long type = (unsigned long)attr->attr;
  1250. if (type != KVM_VGIC_ITS_ADDR_TYPE)
  1251. return -ENODEV;
  1252. if (copy_to_user(uaddr, &addr, sizeof(addr)))
  1253. return -EFAULT;
  1254. break;
  1255. default:
  1256. return -ENXIO;
  1257. }
  1258. }
  1259. return 0;
  1260. }
  1261. static struct kvm_device_ops kvm_arm_vgic_its_ops = {
  1262. .name = "kvm-arm-vgic-its",
  1263. .create = vgic_its_create,
  1264. .destroy = vgic_its_destroy,
  1265. .set_attr = vgic_its_set_attr,
  1266. .get_attr = vgic_its_get_attr,
  1267. .has_attr = vgic_its_has_attr,
  1268. };
  1269. int kvm_vgic_register_its_device(void)
  1270. {
  1271. return kvm_register_device_ops(&kvm_arm_vgic_its_ops,
  1272. KVM_DEV_TYPE_ARM_VGIC_ITS);
  1273. }
  1274. /*
  1275. * Registers all ITSes with the kvm_io_bus framework.
  1276. * To follow the existing VGIC initialization sequence, this has to be
  1277. * done as late as possible, just before the first VCPU runs.
  1278. */
  1279. int vgic_register_its_iodevs(struct kvm *kvm)
  1280. {
  1281. struct kvm_device *dev;
  1282. int ret = 0;
  1283. list_for_each_entry(dev, &kvm->devices, vm_node) {
  1284. if (dev->ops != &kvm_arm_vgic_its_ops)
  1285. continue;
  1286. ret = vgic_register_its_iodev(kvm, dev->private);
  1287. if (ret)
  1288. return ret;
  1289. /*
  1290. * We don't need to care about tearing down previously
  1291. * registered ITSes, as the kvm_io_bus framework removes
  1292. * them for us if the VM gets destroyed.
  1293. */
  1294. }
  1295. return ret;
  1296. }