vfio_pci.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505
  1. /*
  2. * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
  3. * Author: Alex Williamson <alex.williamson@redhat.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * Derived from original vfio:
  10. * Copyright 2010 Cisco Systems, Inc. All rights reserved.
  11. * Author: Tom Lyon, pugs@cisco.com
  12. */
  13. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14. #include <linux/device.h>
  15. #include <linux/eventfd.h>
  16. #include <linux/file.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/iommu.h>
  19. #include <linux/module.h>
  20. #include <linux/mutex.h>
  21. #include <linux/notifier.h>
  22. #include <linux/pci.h>
  23. #include <linux/pm_runtime.h>
  24. #include <linux/slab.h>
  25. #include <linux/types.h>
  26. #include <linux/uaccess.h>
  27. #include <linux/vfio.h>
  28. #include <linux/vgaarb.h>
  29. #include "vfio_pci_private.h"
  30. #define DRIVER_VERSION "0.2"
  31. #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
  32. #define DRIVER_DESC "VFIO PCI - User Level meta-driver"
  33. static char ids[1024] __initdata;
  34. module_param_string(ids, ids, sizeof(ids), 0);
  35. MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the vfio driver, format is \"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\" and multiple comma separated entries can be specified");
  36. static bool nointxmask;
  37. module_param_named(nointxmask, nointxmask, bool, S_IRUGO | S_IWUSR);
  38. MODULE_PARM_DESC(nointxmask,
  39. "Disable support for PCI 2.3 style INTx masking. If this resolves problems for specific devices, report lspci -vvvxxx to linux-pci@vger.kernel.org so the device can be fixed automatically via the broken_intx_masking flag.");
  40. #ifdef CONFIG_VFIO_PCI_VGA
  41. static bool disable_vga;
  42. module_param(disable_vga, bool, S_IRUGO);
  43. MODULE_PARM_DESC(disable_vga, "Disable VGA resource access through vfio-pci");
  44. #endif
  45. static bool disable_idle_d3;
  46. module_param(disable_idle_d3, bool, S_IRUGO | S_IWUSR);
  47. MODULE_PARM_DESC(disable_idle_d3,
  48. "Disable using the PCI D3 low power state for idle, unused devices");
  49. static DEFINE_MUTEX(driver_lock);
  50. static inline bool vfio_vga_disabled(void)
  51. {
  52. #ifdef CONFIG_VFIO_PCI_VGA
  53. return disable_vga;
  54. #else
  55. return true;
  56. #endif
  57. }
  58. /*
  59. * Our VGA arbiter participation is limited since we don't know anything
  60. * about the device itself. However, if the device is the only VGA device
  61. * downstream of a bridge and VFIO VGA support is disabled, then we can
  62. * safely return legacy VGA IO and memory as not decoded since the user
  63. * has no way to get to it and routing can be disabled externally at the
  64. * bridge.
  65. */
  66. static unsigned int vfio_pci_set_vga_decode(void *opaque, bool single_vga)
  67. {
  68. struct vfio_pci_device *vdev = opaque;
  69. struct pci_dev *tmp = NULL, *pdev = vdev->pdev;
  70. unsigned char max_busnr;
  71. unsigned int decodes;
  72. if (single_vga || !vfio_vga_disabled() || pci_is_root_bus(pdev->bus))
  73. return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
  74. VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
  75. max_busnr = pci_bus_max_busnr(pdev->bus);
  76. decodes = VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
  77. while ((tmp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, tmp)) != NULL) {
  78. if (tmp == pdev ||
  79. pci_domain_nr(tmp->bus) != pci_domain_nr(pdev->bus) ||
  80. pci_is_root_bus(tmp->bus))
  81. continue;
  82. if (tmp->bus->number >= pdev->bus->number &&
  83. tmp->bus->number <= max_busnr) {
  84. pci_dev_put(tmp);
  85. decodes |= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
  86. break;
  87. }
  88. }
  89. return decodes;
  90. }
  91. static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
  92. {
  93. return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
  94. }
  95. static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
  96. {
  97. struct resource *res;
  98. int bar;
  99. struct vfio_pci_dummy_resource *dummy_res;
  100. INIT_LIST_HEAD(&vdev->dummy_resources_list);
  101. for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
  102. res = vdev->pdev->resource + bar;
  103. if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP))
  104. goto no_mmap;
  105. if (!(res->flags & IORESOURCE_MEM))
  106. goto no_mmap;
  107. /*
  108. * The PCI core shouldn't set up a resource with a
  109. * type but zero size. But there may be bugs that
  110. * cause us to do that.
  111. */
  112. if (!resource_size(res))
  113. goto no_mmap;
  114. if (resource_size(res) >= PAGE_SIZE) {
  115. vdev->bar_mmap_supported[bar] = true;
  116. continue;
  117. }
  118. if (!(res->start & ~PAGE_MASK)) {
  119. /*
  120. * Add a dummy resource to reserve the remainder
  121. * of the exclusive page in case that hot-add
  122. * device's bar is assigned into it.
  123. */
  124. dummy_res = kzalloc(sizeof(*dummy_res), GFP_KERNEL);
  125. if (dummy_res == NULL)
  126. goto no_mmap;
  127. dummy_res->resource.name = "vfio sub-page reserved";
  128. dummy_res->resource.start = res->end + 1;
  129. dummy_res->resource.end = res->start + PAGE_SIZE - 1;
  130. dummy_res->resource.flags = res->flags;
  131. if (request_resource(res->parent,
  132. &dummy_res->resource)) {
  133. kfree(dummy_res);
  134. goto no_mmap;
  135. }
  136. dummy_res->index = bar;
  137. list_add(&dummy_res->res_next,
  138. &vdev->dummy_resources_list);
  139. vdev->bar_mmap_supported[bar] = true;
  140. continue;
  141. }
  142. /*
  143. * Here we don't handle the case when the BAR is not page
  144. * aligned because we can't expect the BAR will be
  145. * assigned into the same location in a page in guest
  146. * when we passthrough the BAR. And it's hard to access
  147. * this BAR in userspace because we have no way to get
  148. * the BAR's location in a page.
  149. */
  150. no_mmap:
  151. vdev->bar_mmap_supported[bar] = false;
  152. }
  153. }
  154. static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
  155. static void vfio_pci_disable(struct vfio_pci_device *vdev);
  156. /*
  157. * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
  158. * _and_ the ability detect when the device is asserting INTx via PCI_STATUS.
  159. * If a device implements the former but not the latter we would typically
  160. * expect broken_intx_masking be set and require an exclusive interrupt.
  161. * However since we do have control of the device's ability to assert INTx,
  162. * we can instead pretend that the device does not implement INTx, virtualizing
  163. * the pin register to report zero and maintaining DisINTx set on the host.
  164. */
  165. static bool vfio_pci_nointx(struct pci_dev *pdev)
  166. {
  167. switch (pdev->vendor) {
  168. case PCI_VENDOR_ID_INTEL:
  169. switch (pdev->device) {
  170. /* All i40e (XL710/X710) 10/20/40GbE NICs */
  171. case 0x1572:
  172. case 0x1574:
  173. case 0x1580 ... 0x1581:
  174. case 0x1583 ... 0x1589:
  175. case 0x37d0 ... 0x37d2:
  176. return true;
  177. default:
  178. return false;
  179. }
  180. }
  181. return false;
  182. }
  183. static int vfio_pci_enable(struct vfio_pci_device *vdev)
  184. {
  185. struct pci_dev *pdev = vdev->pdev;
  186. int ret;
  187. u16 cmd;
  188. u8 msix_pos;
  189. pci_set_power_state(pdev, PCI_D0);
  190. /* Don't allow our initial saved state to include busmaster */
  191. pci_clear_master(pdev);
  192. ret = pci_enable_device(pdev);
  193. if (ret)
  194. return ret;
  195. vdev->reset_works = (pci_reset_function(pdev) == 0);
  196. pci_save_state(pdev);
  197. vdev->pci_saved_state = pci_store_saved_state(pdev);
  198. if (!vdev->pci_saved_state)
  199. pr_debug("%s: Couldn't store %s saved state\n",
  200. __func__, dev_name(&pdev->dev));
  201. if (likely(!nointxmask)) {
  202. if (vfio_pci_nointx(pdev)) {
  203. dev_info(&pdev->dev, "Masking broken INTx support\n");
  204. vdev->nointx = true;
  205. pci_intx(pdev, 0);
  206. } else
  207. vdev->pci_2_3 = pci_intx_mask_supported(pdev);
  208. }
  209. pci_read_config_word(pdev, PCI_COMMAND, &cmd);
  210. if (vdev->pci_2_3 && (cmd & PCI_COMMAND_INTX_DISABLE)) {
  211. cmd &= ~PCI_COMMAND_INTX_DISABLE;
  212. pci_write_config_word(pdev, PCI_COMMAND, cmd);
  213. }
  214. ret = vfio_config_init(vdev);
  215. if (ret) {
  216. kfree(vdev->pci_saved_state);
  217. vdev->pci_saved_state = NULL;
  218. pci_disable_device(pdev);
  219. return ret;
  220. }
  221. msix_pos = pdev->msix_cap;
  222. if (msix_pos) {
  223. u16 flags;
  224. u32 table;
  225. pci_read_config_word(pdev, msix_pos + PCI_MSIX_FLAGS, &flags);
  226. pci_read_config_dword(pdev, msix_pos + PCI_MSIX_TABLE, &table);
  227. vdev->msix_bar = table & PCI_MSIX_TABLE_BIR;
  228. vdev->msix_offset = table & PCI_MSIX_TABLE_OFFSET;
  229. vdev->msix_size = ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) * 16;
  230. } else
  231. vdev->msix_bar = 0xFF;
  232. if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev))
  233. vdev->has_vga = true;
  234. if (vfio_pci_is_vga(pdev) &&
  235. pdev->vendor == PCI_VENDOR_ID_INTEL &&
  236. IS_ENABLED(CONFIG_VFIO_PCI_IGD)) {
  237. ret = vfio_pci_igd_init(vdev);
  238. if (ret) {
  239. dev_warn(&vdev->pdev->dev,
  240. "Failed to setup Intel IGD regions\n");
  241. vfio_pci_disable(vdev);
  242. return ret;
  243. }
  244. }
  245. vfio_pci_probe_mmaps(vdev);
  246. return 0;
  247. }
  248. static void vfio_pci_disable(struct vfio_pci_device *vdev)
  249. {
  250. struct pci_dev *pdev = vdev->pdev;
  251. struct vfio_pci_dummy_resource *dummy_res, *tmp;
  252. int i, bar;
  253. /* Stop the device from further DMA */
  254. pci_clear_master(pdev);
  255. vfio_pci_set_irqs_ioctl(vdev, VFIO_IRQ_SET_DATA_NONE |
  256. VFIO_IRQ_SET_ACTION_TRIGGER,
  257. vdev->irq_type, 0, 0, NULL);
  258. vdev->virq_disabled = false;
  259. for (i = 0; i < vdev->num_regions; i++)
  260. vdev->region[i].ops->release(vdev, &vdev->region[i]);
  261. vdev->num_regions = 0;
  262. kfree(vdev->region);
  263. vdev->region = NULL; /* don't krealloc a freed pointer */
  264. vfio_config_free(vdev);
  265. for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
  266. if (!vdev->barmap[bar])
  267. continue;
  268. pci_iounmap(pdev, vdev->barmap[bar]);
  269. pci_release_selected_regions(pdev, 1 << bar);
  270. vdev->barmap[bar] = NULL;
  271. }
  272. list_for_each_entry_safe(dummy_res, tmp,
  273. &vdev->dummy_resources_list, res_next) {
  274. list_del(&dummy_res->res_next);
  275. release_resource(&dummy_res->resource);
  276. kfree(dummy_res);
  277. }
  278. vdev->needs_reset = true;
  279. /*
  280. * If we have saved state, restore it. If we can reset the device,
  281. * even better. Resetting with current state seems better than
  282. * nothing, but saving and restoring current state without reset
  283. * is just busy work.
  284. */
  285. if (pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state)) {
  286. pr_info("%s: Couldn't reload %s saved state\n",
  287. __func__, dev_name(&pdev->dev));
  288. if (!vdev->reset_works)
  289. goto out;
  290. pci_save_state(pdev);
  291. }
  292. /*
  293. * Disable INTx and MSI, presumably to avoid spurious interrupts
  294. * during reset. Stolen from pci_reset_function()
  295. */
  296. pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
  297. /*
  298. * Try to reset the device. The success of this is dependent on
  299. * being able to lock the device, which is not always possible.
  300. */
  301. if (vdev->reset_works && !pci_try_reset_function(pdev))
  302. vdev->needs_reset = false;
  303. pci_restore_state(pdev);
  304. out:
  305. pci_disable_device(pdev);
  306. vfio_pci_try_bus_reset(vdev);
  307. if (!disable_idle_d3)
  308. pci_set_power_state(pdev, PCI_D3hot);
  309. }
  310. static void vfio_pci_release(void *device_data)
  311. {
  312. struct vfio_pci_device *vdev = device_data;
  313. mutex_lock(&driver_lock);
  314. if (!(--vdev->refcnt)) {
  315. vfio_spapr_pci_eeh_release(vdev->pdev);
  316. vfio_pci_disable(vdev);
  317. }
  318. mutex_unlock(&driver_lock);
  319. module_put(THIS_MODULE);
  320. }
  321. static int vfio_pci_open(void *device_data)
  322. {
  323. struct vfio_pci_device *vdev = device_data;
  324. int ret = 0;
  325. if (!try_module_get(THIS_MODULE))
  326. return -ENODEV;
  327. mutex_lock(&driver_lock);
  328. if (!vdev->refcnt) {
  329. ret = vfio_pci_enable(vdev);
  330. if (ret)
  331. goto error;
  332. vfio_spapr_pci_eeh_open(vdev->pdev);
  333. }
  334. vdev->refcnt++;
  335. error:
  336. mutex_unlock(&driver_lock);
  337. if (ret)
  338. module_put(THIS_MODULE);
  339. return ret;
  340. }
  341. static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
  342. {
  343. if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
  344. u8 pin;
  345. pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
  346. if (IS_ENABLED(CONFIG_VFIO_PCI_INTX) && !vdev->nointx && pin)
  347. return 1;
  348. } else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
  349. u8 pos;
  350. u16 flags;
  351. pos = vdev->pdev->msi_cap;
  352. if (pos) {
  353. pci_read_config_word(vdev->pdev,
  354. pos + PCI_MSI_FLAGS, &flags);
  355. return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1);
  356. }
  357. } else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) {
  358. u8 pos;
  359. u16 flags;
  360. pos = vdev->pdev->msix_cap;
  361. if (pos) {
  362. pci_read_config_word(vdev->pdev,
  363. pos + PCI_MSIX_FLAGS, &flags);
  364. return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
  365. }
  366. } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) {
  367. if (pci_is_pcie(vdev->pdev))
  368. return 1;
  369. } else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
  370. return 1;
  371. }
  372. return 0;
  373. }
  374. static int vfio_pci_count_devs(struct pci_dev *pdev, void *data)
  375. {
  376. (*(int *)data)++;
  377. return 0;
  378. }
  379. struct vfio_pci_fill_info {
  380. int max;
  381. int cur;
  382. struct vfio_pci_dependent_device *devices;
  383. };
  384. static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
  385. {
  386. struct vfio_pci_fill_info *fill = data;
  387. struct iommu_group *iommu_group;
  388. if (fill->cur == fill->max)
  389. return -EAGAIN; /* Something changed, try again */
  390. iommu_group = iommu_group_get(&pdev->dev);
  391. if (!iommu_group)
  392. return -EPERM; /* Cannot reset non-isolated devices */
  393. fill->devices[fill->cur].group_id = iommu_group_id(iommu_group);
  394. fill->devices[fill->cur].segment = pci_domain_nr(pdev->bus);
  395. fill->devices[fill->cur].bus = pdev->bus->number;
  396. fill->devices[fill->cur].devfn = pdev->devfn;
  397. fill->cur++;
  398. iommu_group_put(iommu_group);
  399. return 0;
  400. }
  401. struct vfio_pci_group_entry {
  402. struct vfio_group *group;
  403. int id;
  404. };
  405. struct vfio_pci_group_info {
  406. int count;
  407. struct vfio_pci_group_entry *groups;
  408. };
  409. static int vfio_pci_validate_devs(struct pci_dev *pdev, void *data)
  410. {
  411. struct vfio_pci_group_info *info = data;
  412. struct iommu_group *group;
  413. int id, i;
  414. group = iommu_group_get(&pdev->dev);
  415. if (!group)
  416. return -EPERM;
  417. id = iommu_group_id(group);
  418. for (i = 0; i < info->count; i++)
  419. if (info->groups[i].id == id)
  420. break;
  421. iommu_group_put(group);
  422. return (i == info->count) ? -EINVAL : 0;
  423. }
  424. static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
  425. {
  426. for (; pdev; pdev = pdev->bus->self)
  427. if (pdev->bus == slot->bus)
  428. return (pdev->slot == slot);
  429. return false;
  430. }
  431. struct vfio_pci_walk_info {
  432. int (*fn)(struct pci_dev *, void *data);
  433. void *data;
  434. struct pci_dev *pdev;
  435. bool slot;
  436. int ret;
  437. };
  438. static int vfio_pci_walk_wrapper(struct pci_dev *pdev, void *data)
  439. {
  440. struct vfio_pci_walk_info *walk = data;
  441. if (!walk->slot || vfio_pci_dev_below_slot(pdev, walk->pdev->slot))
  442. walk->ret = walk->fn(pdev, walk->data);
  443. return walk->ret;
  444. }
  445. static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev,
  446. int (*fn)(struct pci_dev *,
  447. void *data), void *data,
  448. bool slot)
  449. {
  450. struct vfio_pci_walk_info walk = {
  451. .fn = fn, .data = data, .pdev = pdev, .slot = slot, .ret = 0,
  452. };
  453. pci_walk_bus(pdev->bus, vfio_pci_walk_wrapper, &walk);
  454. return walk.ret;
  455. }
  456. static int msix_sparse_mmap_cap(struct vfio_pci_device *vdev,
  457. struct vfio_info_cap *caps)
  458. {
  459. struct vfio_info_cap_header *header;
  460. struct vfio_region_info_cap_sparse_mmap *sparse;
  461. size_t end, size;
  462. int nr_areas = 2, i = 0;
  463. end = pci_resource_len(vdev->pdev, vdev->msix_bar);
  464. /* If MSI-X table is aligned to the start or end, only one area */
  465. if (((vdev->msix_offset & PAGE_MASK) == 0) ||
  466. (PAGE_ALIGN(vdev->msix_offset + vdev->msix_size) >= end))
  467. nr_areas = 1;
  468. size = sizeof(*sparse) + (nr_areas * sizeof(*sparse->areas));
  469. header = vfio_info_cap_add(caps, size,
  470. VFIO_REGION_INFO_CAP_SPARSE_MMAP, 1);
  471. if (IS_ERR(header))
  472. return PTR_ERR(header);
  473. sparse = container_of(header,
  474. struct vfio_region_info_cap_sparse_mmap, header);
  475. sparse->nr_areas = nr_areas;
  476. if (vdev->msix_offset & PAGE_MASK) {
  477. sparse->areas[i].offset = 0;
  478. sparse->areas[i].size = vdev->msix_offset & PAGE_MASK;
  479. i++;
  480. }
  481. if (PAGE_ALIGN(vdev->msix_offset + vdev->msix_size) < end) {
  482. sparse->areas[i].offset = PAGE_ALIGN(vdev->msix_offset +
  483. vdev->msix_size);
  484. sparse->areas[i].size = end - sparse->areas[i].offset;
  485. i++;
  486. }
  487. return 0;
  488. }
  489. static int region_type_cap(struct vfio_pci_device *vdev,
  490. struct vfio_info_cap *caps,
  491. unsigned int type, unsigned int subtype)
  492. {
  493. struct vfio_info_cap_header *header;
  494. struct vfio_region_info_cap_type *cap;
  495. header = vfio_info_cap_add(caps, sizeof(*cap),
  496. VFIO_REGION_INFO_CAP_TYPE, 1);
  497. if (IS_ERR(header))
  498. return PTR_ERR(header);
  499. cap = container_of(header, struct vfio_region_info_cap_type, header);
  500. cap->type = type;
  501. cap->subtype = subtype;
  502. return 0;
  503. }
  504. int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
  505. unsigned int type, unsigned int subtype,
  506. const struct vfio_pci_regops *ops,
  507. size_t size, u32 flags, void *data)
  508. {
  509. struct vfio_pci_region *region;
  510. region = krealloc(vdev->region,
  511. (vdev->num_regions + 1) * sizeof(*region),
  512. GFP_KERNEL);
  513. if (!region)
  514. return -ENOMEM;
  515. vdev->region = region;
  516. vdev->region[vdev->num_regions].type = type;
  517. vdev->region[vdev->num_regions].subtype = subtype;
  518. vdev->region[vdev->num_regions].ops = ops;
  519. vdev->region[vdev->num_regions].size = size;
  520. vdev->region[vdev->num_regions].flags = flags;
  521. vdev->region[vdev->num_regions].data = data;
  522. vdev->num_regions++;
  523. return 0;
  524. }
  525. static long vfio_pci_ioctl(void *device_data,
  526. unsigned int cmd, unsigned long arg)
  527. {
  528. struct vfio_pci_device *vdev = device_data;
  529. unsigned long minsz;
  530. if (cmd == VFIO_DEVICE_GET_INFO) {
  531. struct vfio_device_info info;
  532. minsz = offsetofend(struct vfio_device_info, num_irqs);
  533. if (copy_from_user(&info, (void __user *)arg, minsz))
  534. return -EFAULT;
  535. if (info.argsz < minsz)
  536. return -EINVAL;
  537. info.flags = VFIO_DEVICE_FLAGS_PCI;
  538. if (vdev->reset_works)
  539. info.flags |= VFIO_DEVICE_FLAGS_RESET;
  540. info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
  541. info.num_irqs = VFIO_PCI_NUM_IRQS;
  542. return copy_to_user((void __user *)arg, &info, minsz) ?
  543. -EFAULT : 0;
  544. } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
  545. struct pci_dev *pdev = vdev->pdev;
  546. struct vfio_region_info info;
  547. struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
  548. int i, ret;
  549. minsz = offsetofend(struct vfio_region_info, offset);
  550. if (copy_from_user(&info, (void __user *)arg, minsz))
  551. return -EFAULT;
  552. if (info.argsz < minsz)
  553. return -EINVAL;
  554. switch (info.index) {
  555. case VFIO_PCI_CONFIG_REGION_INDEX:
  556. info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
  557. info.size = pdev->cfg_size;
  558. info.flags = VFIO_REGION_INFO_FLAG_READ |
  559. VFIO_REGION_INFO_FLAG_WRITE;
  560. break;
  561. case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
  562. info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
  563. info.size = pci_resource_len(pdev, info.index);
  564. if (!info.size) {
  565. info.flags = 0;
  566. break;
  567. }
  568. info.flags = VFIO_REGION_INFO_FLAG_READ |
  569. VFIO_REGION_INFO_FLAG_WRITE;
  570. if (vdev->bar_mmap_supported[info.index]) {
  571. info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
  572. if (info.index == vdev->msix_bar) {
  573. ret = msix_sparse_mmap_cap(vdev, &caps);
  574. if (ret)
  575. return ret;
  576. }
  577. }
  578. break;
  579. case VFIO_PCI_ROM_REGION_INDEX:
  580. {
  581. void __iomem *io;
  582. size_t size;
  583. info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
  584. info.flags = 0;
  585. /* Report the BAR size, not the ROM size */
  586. info.size = pci_resource_len(pdev, info.index);
  587. if (!info.size) {
  588. /* Shadow ROMs appear as PCI option ROMs */
  589. if (pdev->resource[PCI_ROM_RESOURCE].flags &
  590. IORESOURCE_ROM_SHADOW)
  591. info.size = 0x20000;
  592. else
  593. break;
  594. }
  595. /* Is it really there? */
  596. io = pci_map_rom(pdev, &size);
  597. if (!io || !size) {
  598. info.size = 0;
  599. break;
  600. }
  601. pci_unmap_rom(pdev, io);
  602. info.flags = VFIO_REGION_INFO_FLAG_READ;
  603. break;
  604. }
  605. case VFIO_PCI_VGA_REGION_INDEX:
  606. if (!vdev->has_vga)
  607. return -EINVAL;
  608. info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
  609. info.size = 0xc0000;
  610. info.flags = VFIO_REGION_INFO_FLAG_READ |
  611. VFIO_REGION_INFO_FLAG_WRITE;
  612. break;
  613. default:
  614. if (info.index >=
  615. VFIO_PCI_NUM_REGIONS + vdev->num_regions)
  616. return -EINVAL;
  617. i = info.index - VFIO_PCI_NUM_REGIONS;
  618. info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
  619. info.size = vdev->region[i].size;
  620. info.flags = vdev->region[i].flags;
  621. ret = region_type_cap(vdev, &caps,
  622. vdev->region[i].type,
  623. vdev->region[i].subtype);
  624. if (ret)
  625. return ret;
  626. }
  627. if (caps.size) {
  628. info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
  629. if (info.argsz < sizeof(info) + caps.size) {
  630. info.argsz = sizeof(info) + caps.size;
  631. info.cap_offset = 0;
  632. } else {
  633. vfio_info_cap_shift(&caps, sizeof(info));
  634. if (copy_to_user((void __user *)arg +
  635. sizeof(info), caps.buf,
  636. caps.size)) {
  637. kfree(caps.buf);
  638. return -EFAULT;
  639. }
  640. info.cap_offset = sizeof(info);
  641. }
  642. kfree(caps.buf);
  643. }
  644. return copy_to_user((void __user *)arg, &info, minsz) ?
  645. -EFAULT : 0;
  646. } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
  647. struct vfio_irq_info info;
  648. minsz = offsetofend(struct vfio_irq_info, count);
  649. if (copy_from_user(&info, (void __user *)arg, minsz))
  650. return -EFAULT;
  651. if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
  652. return -EINVAL;
  653. switch (info.index) {
  654. case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
  655. case VFIO_PCI_REQ_IRQ_INDEX:
  656. break;
  657. case VFIO_PCI_ERR_IRQ_INDEX:
  658. if (pci_is_pcie(vdev->pdev))
  659. break;
  660. /* pass thru to return error */
  661. default:
  662. return -EINVAL;
  663. }
  664. info.flags = VFIO_IRQ_INFO_EVENTFD;
  665. info.count = vfio_pci_get_irq_count(vdev, info.index);
  666. if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
  667. info.flags |= (VFIO_IRQ_INFO_MASKABLE |
  668. VFIO_IRQ_INFO_AUTOMASKED);
  669. else
  670. info.flags |= VFIO_IRQ_INFO_NORESIZE;
  671. return copy_to_user((void __user *)arg, &info, minsz) ?
  672. -EFAULT : 0;
  673. } else if (cmd == VFIO_DEVICE_SET_IRQS) {
  674. struct vfio_irq_set hdr;
  675. size_t size;
  676. u8 *data = NULL;
  677. int max, ret = 0;
  678. minsz = offsetofend(struct vfio_irq_set, count);
  679. if (copy_from_user(&hdr, (void __user *)arg, minsz))
  680. return -EFAULT;
  681. if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS ||
  682. hdr.count >= (U32_MAX - hdr.start) ||
  683. hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
  684. VFIO_IRQ_SET_ACTION_TYPE_MASK))
  685. return -EINVAL;
  686. max = vfio_pci_get_irq_count(vdev, hdr.index);
  687. if (hdr.start >= max || hdr.start + hdr.count > max)
  688. return -EINVAL;
  689. switch (hdr.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
  690. case VFIO_IRQ_SET_DATA_NONE:
  691. size = 0;
  692. break;
  693. case VFIO_IRQ_SET_DATA_BOOL:
  694. size = sizeof(uint8_t);
  695. break;
  696. case VFIO_IRQ_SET_DATA_EVENTFD:
  697. size = sizeof(int32_t);
  698. break;
  699. default:
  700. return -EINVAL;
  701. }
  702. if (size) {
  703. if (hdr.argsz - minsz < hdr.count * size)
  704. return -EINVAL;
  705. data = memdup_user((void __user *)(arg + minsz),
  706. hdr.count * size);
  707. if (IS_ERR(data))
  708. return PTR_ERR(data);
  709. }
  710. mutex_lock(&vdev->igate);
  711. ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
  712. hdr.start, hdr.count, data);
  713. mutex_unlock(&vdev->igate);
  714. kfree(data);
  715. return ret;
  716. } else if (cmd == VFIO_DEVICE_RESET) {
  717. return vdev->reset_works ?
  718. pci_try_reset_function(vdev->pdev) : -EINVAL;
  719. } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
  720. struct vfio_pci_hot_reset_info hdr;
  721. struct vfio_pci_fill_info fill = { 0 };
  722. struct vfio_pci_dependent_device *devices = NULL;
  723. bool slot = false;
  724. int ret = 0;
  725. minsz = offsetofend(struct vfio_pci_hot_reset_info, count);
  726. if (copy_from_user(&hdr, (void __user *)arg, minsz))
  727. return -EFAULT;
  728. if (hdr.argsz < minsz)
  729. return -EINVAL;
  730. hdr.flags = 0;
  731. /* Can we do a slot or bus reset or neither? */
  732. if (!pci_probe_reset_slot(vdev->pdev->slot))
  733. slot = true;
  734. else if (pci_probe_reset_bus(vdev->pdev->bus))
  735. return -ENODEV;
  736. /* How many devices are affected? */
  737. ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
  738. vfio_pci_count_devs,
  739. &fill.max, slot);
  740. if (ret)
  741. return ret;
  742. WARN_ON(!fill.max); /* Should always be at least one */
  743. /*
  744. * If there's enough space, fill it now, otherwise return
  745. * -ENOSPC and the number of devices affected.
  746. */
  747. if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
  748. ret = -ENOSPC;
  749. hdr.count = fill.max;
  750. goto reset_info_exit;
  751. }
  752. devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
  753. if (!devices)
  754. return -ENOMEM;
  755. fill.devices = devices;
  756. ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
  757. vfio_pci_fill_devs,
  758. &fill, slot);
  759. /*
  760. * If a device was removed between counting and filling,
  761. * we may come up short of fill.max. If a device was
  762. * added, we'll have a return of -EAGAIN above.
  763. */
  764. if (!ret)
  765. hdr.count = fill.cur;
  766. reset_info_exit:
  767. if (copy_to_user((void __user *)arg, &hdr, minsz))
  768. ret = -EFAULT;
  769. if (!ret) {
  770. if (copy_to_user((void __user *)(arg + minsz), devices,
  771. hdr.count * sizeof(*devices)))
  772. ret = -EFAULT;
  773. }
  774. kfree(devices);
  775. return ret;
  776. } else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) {
  777. struct vfio_pci_hot_reset hdr;
  778. int32_t *group_fds;
  779. struct vfio_pci_group_entry *groups;
  780. struct vfio_pci_group_info info;
  781. bool slot = false;
  782. int i, count = 0, ret = 0;
  783. minsz = offsetofend(struct vfio_pci_hot_reset, count);
  784. if (copy_from_user(&hdr, (void __user *)arg, minsz))
  785. return -EFAULT;
  786. if (hdr.argsz < minsz || hdr.flags)
  787. return -EINVAL;
  788. /* Can we do a slot or bus reset or neither? */
  789. if (!pci_probe_reset_slot(vdev->pdev->slot))
  790. slot = true;
  791. else if (pci_probe_reset_bus(vdev->pdev->bus))
  792. return -ENODEV;
  793. /*
  794. * We can't let userspace give us an arbitrarily large
  795. * buffer to copy, so verify how many we think there
  796. * could be. Note groups can have multiple devices so
  797. * one group per device is the max.
  798. */
  799. ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
  800. vfio_pci_count_devs,
  801. &count, slot);
  802. if (ret)
  803. return ret;
  804. /* Somewhere between 1 and count is OK */
  805. if (!hdr.count || hdr.count > count)
  806. return -EINVAL;
  807. group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
  808. groups = kcalloc(hdr.count, sizeof(*groups), GFP_KERNEL);
  809. if (!group_fds || !groups) {
  810. kfree(group_fds);
  811. kfree(groups);
  812. return -ENOMEM;
  813. }
  814. if (copy_from_user(group_fds, (void __user *)(arg + minsz),
  815. hdr.count * sizeof(*group_fds))) {
  816. kfree(group_fds);
  817. kfree(groups);
  818. return -EFAULT;
  819. }
  820. /*
  821. * For each group_fd, get the group through the vfio external
  822. * user interface and store the group and iommu ID. This
  823. * ensures the group is held across the reset.
  824. */
  825. for (i = 0; i < hdr.count; i++) {
  826. struct vfio_group *group;
  827. struct fd f = fdget(group_fds[i]);
  828. if (!f.file) {
  829. ret = -EBADF;
  830. break;
  831. }
  832. group = vfio_group_get_external_user(f.file);
  833. fdput(f);
  834. if (IS_ERR(group)) {
  835. ret = PTR_ERR(group);
  836. break;
  837. }
  838. groups[i].group = group;
  839. groups[i].id = vfio_external_user_iommu_id(group);
  840. }
  841. kfree(group_fds);
  842. /* release reference to groups on error */
  843. if (ret)
  844. goto hot_reset_release;
  845. info.count = hdr.count;
  846. info.groups = groups;
  847. /*
  848. * Test whether all the affected devices are contained
  849. * by the set of groups provided by the user.
  850. */
  851. ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
  852. vfio_pci_validate_devs,
  853. &info, slot);
  854. if (!ret)
  855. /* User has access, do the reset */
  856. ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
  857. pci_try_reset_bus(vdev->pdev->bus);
  858. hot_reset_release:
  859. for (i--; i >= 0; i--)
  860. vfio_group_put_external_user(groups[i].group);
  861. kfree(groups);
  862. return ret;
  863. }
  864. return -ENOTTY;
  865. }
  866. static ssize_t vfio_pci_rw(void *device_data, char __user *buf,
  867. size_t count, loff_t *ppos, bool iswrite)
  868. {
  869. unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
  870. struct vfio_pci_device *vdev = device_data;
  871. if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
  872. return -EINVAL;
  873. switch (index) {
  874. case VFIO_PCI_CONFIG_REGION_INDEX:
  875. return vfio_pci_config_rw(vdev, buf, count, ppos, iswrite);
  876. case VFIO_PCI_ROM_REGION_INDEX:
  877. if (iswrite)
  878. return -EINVAL;
  879. return vfio_pci_bar_rw(vdev, buf, count, ppos, false);
  880. case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
  881. return vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite);
  882. case VFIO_PCI_VGA_REGION_INDEX:
  883. return vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
  884. default:
  885. index -= VFIO_PCI_NUM_REGIONS;
  886. return vdev->region[index].ops->rw(vdev, buf,
  887. count, ppos, iswrite);
  888. }
  889. return -EINVAL;
  890. }
  891. static ssize_t vfio_pci_read(void *device_data, char __user *buf,
  892. size_t count, loff_t *ppos)
  893. {
  894. if (!count)
  895. return 0;
  896. return vfio_pci_rw(device_data, buf, count, ppos, false);
  897. }
  898. static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
  899. size_t count, loff_t *ppos)
  900. {
  901. if (!count)
  902. return 0;
  903. return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
  904. }
  905. static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
  906. {
  907. struct vfio_pci_device *vdev = device_data;
  908. struct pci_dev *pdev = vdev->pdev;
  909. unsigned int index;
  910. u64 phys_len, req_len, pgoff, req_start;
  911. int ret;
  912. index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
  913. if (vma->vm_end < vma->vm_start)
  914. return -EINVAL;
  915. if ((vma->vm_flags & VM_SHARED) == 0)
  916. return -EINVAL;
  917. if (index >= VFIO_PCI_ROM_REGION_INDEX)
  918. return -EINVAL;
  919. if (!vdev->bar_mmap_supported[index])
  920. return -EINVAL;
  921. phys_len = PAGE_ALIGN(pci_resource_len(pdev, index));
  922. req_len = vma->vm_end - vma->vm_start;
  923. pgoff = vma->vm_pgoff &
  924. ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
  925. req_start = pgoff << PAGE_SHIFT;
  926. if (req_start + req_len > phys_len)
  927. return -EINVAL;
  928. if (index == vdev->msix_bar) {
  929. /*
  930. * Disallow mmaps overlapping the MSI-X table; users don't
  931. * get to touch this directly. We could find somewhere
  932. * else to map the overlap, but page granularity is only
  933. * a recommendation, not a requirement, so the user needs
  934. * to know which bits are real. Requiring them to mmap
  935. * around the table makes that clear.
  936. */
  937. /* If neither entirely above nor below, then it overlaps */
  938. if (!(req_start >= vdev->msix_offset + vdev->msix_size ||
  939. req_start + req_len <= vdev->msix_offset))
  940. return -EINVAL;
  941. }
  942. /*
  943. * Even though we don't make use of the barmap for the mmap,
  944. * we need to request the region and the barmap tracks that.
  945. */
  946. if (!vdev->barmap[index]) {
  947. ret = pci_request_selected_regions(pdev,
  948. 1 << index, "vfio-pci");
  949. if (ret)
  950. return ret;
  951. vdev->barmap[index] = pci_iomap(pdev, index, 0);
  952. if (!vdev->barmap[index]) {
  953. pci_release_selected_regions(pdev, 1 << index);
  954. return -ENOMEM;
  955. }
  956. }
  957. vma->vm_private_data = vdev;
  958. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  959. vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
  960. return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
  961. req_len, vma->vm_page_prot);
  962. }
  963. static void vfio_pci_request(void *device_data, unsigned int count)
  964. {
  965. struct vfio_pci_device *vdev = device_data;
  966. mutex_lock(&vdev->igate);
  967. if (vdev->req_trigger) {
  968. if (!(count % 10))
  969. dev_notice_ratelimited(&vdev->pdev->dev,
  970. "Relaying device request to user (#%u)\n",
  971. count);
  972. eventfd_signal(vdev->req_trigger, 1);
  973. } else if (count == 0) {
  974. dev_warn(&vdev->pdev->dev,
  975. "No device request channel registered, blocked until released by user\n");
  976. }
  977. mutex_unlock(&vdev->igate);
  978. }
  979. static const struct vfio_device_ops vfio_pci_ops = {
  980. .name = "vfio-pci",
  981. .open = vfio_pci_open,
  982. .release = vfio_pci_release,
  983. .ioctl = vfio_pci_ioctl,
  984. .read = vfio_pci_read,
  985. .write = vfio_pci_write,
  986. .mmap = vfio_pci_mmap,
  987. .request = vfio_pci_request,
  988. };
  989. static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  990. {
  991. struct vfio_pci_device *vdev;
  992. struct iommu_group *group;
  993. int ret;
  994. if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
  995. return -EINVAL;
  996. group = vfio_iommu_group_get(&pdev->dev);
  997. if (!group)
  998. return -EINVAL;
  999. vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
  1000. if (!vdev) {
  1001. vfio_iommu_group_put(group, &pdev->dev);
  1002. return -ENOMEM;
  1003. }
  1004. vdev->pdev = pdev;
  1005. vdev->irq_type = VFIO_PCI_NUM_IRQS;
  1006. mutex_init(&vdev->igate);
  1007. spin_lock_init(&vdev->irqlock);
  1008. ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
  1009. if (ret) {
  1010. vfio_iommu_group_put(group, &pdev->dev);
  1011. kfree(vdev);
  1012. return ret;
  1013. }
  1014. if (vfio_pci_is_vga(pdev)) {
  1015. vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
  1016. vga_set_legacy_decoding(pdev,
  1017. vfio_pci_set_vga_decode(vdev, false));
  1018. }
  1019. if (!disable_idle_d3) {
  1020. /*
  1021. * pci-core sets the device power state to an unknown value at
  1022. * bootup and after being removed from a driver. The only
  1023. * transition it allows from this unknown state is to D0, which
  1024. * typically happens when a driver calls pci_enable_device().
  1025. * We're not ready to enable the device yet, but we do want to
  1026. * be able to get to D3. Therefore first do a D0 transition
  1027. * before going to D3.
  1028. */
  1029. pci_set_power_state(pdev, PCI_D0);
  1030. pci_set_power_state(pdev, PCI_D3hot);
  1031. }
  1032. return ret;
  1033. }
  1034. static void vfio_pci_remove(struct pci_dev *pdev)
  1035. {
  1036. struct vfio_pci_device *vdev;
  1037. vdev = vfio_del_group_dev(&pdev->dev);
  1038. if (!vdev)
  1039. return;
  1040. vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
  1041. kfree(vdev->region);
  1042. kfree(vdev);
  1043. if (vfio_pci_is_vga(pdev)) {
  1044. vga_client_register(pdev, NULL, NULL, NULL);
  1045. vga_set_legacy_decoding(pdev,
  1046. VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
  1047. VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
  1048. }
  1049. if (!disable_idle_d3)
  1050. pci_set_power_state(pdev, PCI_D0);
  1051. }
  1052. static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
  1053. pci_channel_state_t state)
  1054. {
  1055. struct vfio_pci_device *vdev;
  1056. struct vfio_device *device;
  1057. device = vfio_device_get_from_dev(&pdev->dev);
  1058. if (device == NULL)
  1059. return PCI_ERS_RESULT_DISCONNECT;
  1060. vdev = vfio_device_data(device);
  1061. if (vdev == NULL) {
  1062. vfio_device_put(device);
  1063. return PCI_ERS_RESULT_DISCONNECT;
  1064. }
  1065. mutex_lock(&vdev->igate);
  1066. if (vdev->err_trigger)
  1067. eventfd_signal(vdev->err_trigger, 1);
  1068. mutex_unlock(&vdev->igate);
  1069. vfio_device_put(device);
  1070. return PCI_ERS_RESULT_CAN_RECOVER;
  1071. }
  1072. static const struct pci_error_handlers vfio_err_handlers = {
  1073. .error_detected = vfio_pci_aer_err_detected,
  1074. };
  1075. static struct pci_driver vfio_pci_driver = {
  1076. .name = "vfio-pci",
  1077. .id_table = NULL, /* only dynamic ids */
  1078. .probe = vfio_pci_probe,
  1079. .remove = vfio_pci_remove,
  1080. .err_handler = &vfio_err_handlers,
  1081. };
  1082. struct vfio_devices {
  1083. struct vfio_device **devices;
  1084. int cur_index;
  1085. int max_index;
  1086. };
  1087. static int vfio_pci_get_devs(struct pci_dev *pdev, void *data)
  1088. {
  1089. struct vfio_devices *devs = data;
  1090. struct vfio_device *device;
  1091. if (devs->cur_index == devs->max_index)
  1092. return -ENOSPC;
  1093. device = vfio_device_get_from_dev(&pdev->dev);
  1094. if (!device)
  1095. return -EINVAL;
  1096. if (pci_dev_driver(pdev) != &vfio_pci_driver) {
  1097. vfio_device_put(device);
  1098. return -EBUSY;
  1099. }
  1100. devs->devices[devs->cur_index++] = device;
  1101. return 0;
  1102. }
  1103. /*
  1104. * Attempt to do a bus/slot reset if there are devices affected by a reset for
  1105. * this device that are needs_reset and all of the affected devices are unused
  1106. * (!refcnt). Callers are required to hold driver_lock when calling this to
  1107. * prevent device opens and concurrent bus reset attempts. We prevent device
  1108. * unbinds by acquiring and holding a reference to the vfio_device.
  1109. *
  1110. * NB: vfio-core considers a group to be viable even if some devices are
  1111. * bound to drivers like pci-stub or pcieport. Here we require all devices
  1112. * to be bound to vfio_pci since that's the only way we can be sure they
  1113. * stay put.
  1114. */
  1115. static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev)
  1116. {
  1117. struct vfio_devices devs = { .cur_index = 0 };
  1118. int i = 0, ret = -EINVAL;
  1119. bool needs_reset = false, slot = false;
  1120. struct vfio_pci_device *tmp;
  1121. if (!pci_probe_reset_slot(vdev->pdev->slot))
  1122. slot = true;
  1123. else if (pci_probe_reset_bus(vdev->pdev->bus))
  1124. return;
  1125. if (vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
  1126. &i, slot) || !i)
  1127. return;
  1128. devs.max_index = i;
  1129. devs.devices = kcalloc(i, sizeof(struct vfio_device *), GFP_KERNEL);
  1130. if (!devs.devices)
  1131. return;
  1132. if (vfio_pci_for_each_slot_or_bus(vdev->pdev,
  1133. vfio_pci_get_devs, &devs, slot))
  1134. goto put_devs;
  1135. for (i = 0; i < devs.cur_index; i++) {
  1136. tmp = vfio_device_data(devs.devices[i]);
  1137. if (tmp->needs_reset)
  1138. needs_reset = true;
  1139. if (tmp->refcnt)
  1140. goto put_devs;
  1141. }
  1142. if (needs_reset)
  1143. ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
  1144. pci_try_reset_bus(vdev->pdev->bus);
  1145. put_devs:
  1146. for (i = 0; i < devs.cur_index; i++) {
  1147. tmp = vfio_device_data(devs.devices[i]);
  1148. if (!ret)
  1149. tmp->needs_reset = false;
  1150. if (!tmp->refcnt && !disable_idle_d3)
  1151. pci_set_power_state(tmp->pdev, PCI_D3hot);
  1152. vfio_device_put(devs.devices[i]);
  1153. }
  1154. kfree(devs.devices);
  1155. }
  1156. static void __exit vfio_pci_cleanup(void)
  1157. {
  1158. pci_unregister_driver(&vfio_pci_driver);
  1159. vfio_pci_uninit_perm_bits();
  1160. }
  1161. static void __init vfio_pci_fill_ids(void)
  1162. {
  1163. char *p, *id;
  1164. int rc;
  1165. /* no ids passed actually */
  1166. if (ids[0] == '\0')
  1167. return;
  1168. /* add ids specified in the module parameter */
  1169. p = ids;
  1170. while ((id = strsep(&p, ","))) {
  1171. unsigned int vendor, device, subvendor = PCI_ANY_ID,
  1172. subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
  1173. int fields;
  1174. if (!strlen(id))
  1175. continue;
  1176. fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
  1177. &vendor, &device, &subvendor, &subdevice,
  1178. &class, &class_mask);
  1179. if (fields < 2) {
  1180. pr_warn("invalid id string \"%s\"\n", id);
  1181. continue;
  1182. }
  1183. rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
  1184. subvendor, subdevice, class, class_mask, 0);
  1185. if (rc)
  1186. pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n",
  1187. vendor, device, subvendor, subdevice,
  1188. class, class_mask, rc);
  1189. else
  1190. pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n",
  1191. vendor, device, subvendor, subdevice,
  1192. class, class_mask);
  1193. }
  1194. }
  1195. static int __init vfio_pci_init(void)
  1196. {
  1197. int ret;
  1198. /* Allocate shared config space permision data used by all devices */
  1199. ret = vfio_pci_init_perm_bits();
  1200. if (ret)
  1201. return ret;
  1202. /* Register and scan for devices */
  1203. ret = pci_register_driver(&vfio_pci_driver);
  1204. if (ret)
  1205. goto out_driver;
  1206. vfio_pci_fill_ids();
  1207. return 0;
  1208. out_driver:
  1209. vfio_pci_uninit_perm_bits();
  1210. return ret;
  1211. }
  1212. module_init(vfio_pci_init);
  1213. module_exit(vfio_pci_cleanup);
  1214. MODULE_VERSION(DRIVER_VERSION);
  1215. MODULE_LICENSE("GPL v2");
  1216. MODULE_AUTHOR(DRIVER_AUTHOR);
  1217. MODULE_DESCRIPTION(DRIVER_DESC);