virtio_balloon.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684
  1. /*
  2. * Virtio balloon implementation, inspired by Dor Laor and Marcelo
  3. * Tosatti's implementations.
  4. *
  5. * Copyright 2008 Rusty Russell IBM Corporation
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include <linux/virtio.h>
  22. #include <linux/virtio_balloon.h>
  23. #include <linux/swap.h>
  24. #include <linux/workqueue.h>
  25. #include <linux/delay.h>
  26. #include <linux/slab.h>
  27. #include <linux/module.h>
  28. #include <linux/balloon_compaction.h>
  29. #include <linux/oom.h>
  30. #include <linux/wait.h>
  31. #include <linux/mm.h>
  32. #include <linux/mount.h>
  33. /*
  34. * Balloon device works in 4K page units. So each page is pointed to by
  35. * multiple balloon pages. All memory counters in this driver are in balloon
  36. * page units.
  37. */
  38. #define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT)
  39. #define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256
  40. #define OOM_VBALLOON_DEFAULT_PAGES 256
  41. #define VIRTBALLOON_OOM_NOTIFY_PRIORITY 80
  42. static int oom_pages = OOM_VBALLOON_DEFAULT_PAGES;
  43. module_param(oom_pages, int, S_IRUSR | S_IWUSR);
  44. MODULE_PARM_DESC(oom_pages, "pages to free on OOM");
  45. #ifdef CONFIG_BALLOON_COMPACTION
  46. static struct vfsmount *balloon_mnt;
  47. #endif
  48. struct virtio_balloon {
  49. struct virtio_device *vdev;
  50. struct virtqueue *inflate_vq, *deflate_vq, *stats_vq;
  51. /* The balloon servicing is delegated to a freezable workqueue. */
  52. struct work_struct update_balloon_stats_work;
  53. struct work_struct update_balloon_size_work;
  54. /* Prevent updating balloon when it is being canceled. */
  55. spinlock_t stop_update_lock;
  56. bool stop_update;
  57. /* Waiting for host to ack the pages we released. */
  58. wait_queue_head_t acked;
  59. /* Number of balloon pages we've told the Host we're not using. */
  60. unsigned int num_pages;
  61. /*
  62. * The pages we've told the Host we're not using are enqueued
  63. * at vb_dev_info->pages list.
  64. * Each page on this list adds VIRTIO_BALLOON_PAGES_PER_PAGE
  65. * to num_pages above.
  66. */
  67. struct balloon_dev_info vb_dev_info;
  68. /* Synchronize access/update to this struct virtio_balloon elements */
  69. struct mutex balloon_lock;
  70. /* The array of pfns we tell the Host about. */
  71. unsigned int num_pfns;
  72. __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX];
  73. /* Memory statistics */
  74. struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
  75. /* To register callback in oom notifier call chain */
  76. struct notifier_block nb;
  77. };
  78. static struct virtio_device_id id_table[] = {
  79. { VIRTIO_ID_BALLOON, VIRTIO_DEV_ANY_ID },
  80. { 0 },
  81. };
  82. static u32 page_to_balloon_pfn(struct page *page)
  83. {
  84. unsigned long pfn = page_to_pfn(page);
  85. BUILD_BUG_ON(PAGE_SHIFT < VIRTIO_BALLOON_PFN_SHIFT);
  86. /* Convert pfn from Linux page size to balloon page size. */
  87. return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE;
  88. }
  89. static struct page *balloon_pfn_to_page(u32 pfn)
  90. {
  91. BUG_ON(pfn % VIRTIO_BALLOON_PAGES_PER_PAGE);
  92. return pfn_to_page(pfn / VIRTIO_BALLOON_PAGES_PER_PAGE);
  93. }
  94. static void balloon_ack(struct virtqueue *vq)
  95. {
  96. struct virtio_balloon *vb = vq->vdev->priv;
  97. wake_up(&vb->acked);
  98. }
  99. static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
  100. {
  101. struct scatterlist sg;
  102. unsigned int len;
  103. sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns);
  104. /* We should always be able to add one buffer to an empty queue. */
  105. virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
  106. virtqueue_kick(vq);
  107. /* When host has read buffer, this completes via balloon_ack */
  108. wait_event(vb->acked, virtqueue_get_buf(vq, &len));
  109. }
  110. static void set_page_pfns(struct virtio_balloon *vb,
  111. __virtio32 pfns[], struct page *page)
  112. {
  113. unsigned int i;
  114. /* Set balloon pfns pointing at this page.
  115. * Note that the first pfn points at start of the page. */
  116. for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++)
  117. pfns[i] = cpu_to_virtio32(vb->vdev,
  118. page_to_balloon_pfn(page) + i);
  119. }
  120. static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
  121. {
  122. struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
  123. unsigned num_allocated_pages;
  124. /* We can only do one array worth at a time. */
  125. num = min(num, ARRAY_SIZE(vb->pfns));
  126. mutex_lock(&vb->balloon_lock);
  127. for (vb->num_pfns = 0; vb->num_pfns < num;
  128. vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
  129. struct page *page = balloon_page_enqueue(vb_dev_info);
  130. if (!page) {
  131. dev_info_ratelimited(&vb->vdev->dev,
  132. "Out of puff! Can't get %u pages\n",
  133. VIRTIO_BALLOON_PAGES_PER_PAGE);
  134. /* Sleep for at least 1/5 of a second before retry. */
  135. msleep(200);
  136. break;
  137. }
  138. set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
  139. vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
  140. if (!virtio_has_feature(vb->vdev,
  141. VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
  142. adjust_managed_page_count(page, -1);
  143. }
  144. num_allocated_pages = vb->num_pfns;
  145. /* Did we get any? */
  146. if (vb->num_pfns != 0)
  147. tell_host(vb, vb->inflate_vq);
  148. mutex_unlock(&vb->balloon_lock);
  149. return num_allocated_pages;
  150. }
  151. static void release_pages_balloon(struct virtio_balloon *vb)
  152. {
  153. unsigned int i;
  154. struct page *page;
  155. /* Find pfns pointing at start of each page, get pages and free them. */
  156. for (i = 0; i < vb->num_pfns; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
  157. page = balloon_pfn_to_page(virtio32_to_cpu(vb->vdev,
  158. vb->pfns[i]));
  159. if (!virtio_has_feature(vb->vdev,
  160. VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
  161. adjust_managed_page_count(page, 1);
  162. put_page(page); /* balloon reference */
  163. }
  164. }
  165. static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
  166. {
  167. unsigned num_freed_pages;
  168. struct page *page;
  169. struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
  170. /* We can only do one array worth at a time. */
  171. num = min(num, ARRAY_SIZE(vb->pfns));
  172. mutex_lock(&vb->balloon_lock);
  173. /* We can't release more pages than taken */
  174. num = min(num, (size_t)vb->num_pages);
  175. for (vb->num_pfns = 0; vb->num_pfns < num;
  176. vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
  177. page = balloon_page_dequeue(vb_dev_info);
  178. if (!page)
  179. break;
  180. set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
  181. vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE;
  182. }
  183. num_freed_pages = vb->num_pfns;
  184. /*
  185. * Note that if
  186. * virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
  187. * is true, we *have* to do it in this order
  188. */
  189. if (vb->num_pfns != 0)
  190. tell_host(vb, vb->deflate_vq);
  191. release_pages_balloon(vb);
  192. mutex_unlock(&vb->balloon_lock);
  193. return num_freed_pages;
  194. }
  195. static inline void update_stat(struct virtio_balloon *vb, int idx,
  196. u16 tag, u64 val)
  197. {
  198. BUG_ON(idx >= VIRTIO_BALLOON_S_NR);
  199. vb->stats[idx].tag = cpu_to_virtio16(vb->vdev, tag);
  200. vb->stats[idx].val = cpu_to_virtio64(vb->vdev, val);
  201. }
  202. #define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
  203. static void update_balloon_stats(struct virtio_balloon *vb)
  204. {
  205. unsigned long events[NR_VM_EVENT_ITEMS];
  206. struct sysinfo i;
  207. int idx = 0;
  208. long available;
  209. all_vm_events(events);
  210. si_meminfo(&i);
  211. available = si_mem_available();
  212. update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
  213. pages_to_bytes(events[PSWPIN]));
  214. update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
  215. pages_to_bytes(events[PSWPOUT]));
  216. update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
  217. update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
  218. update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
  219. pages_to_bytes(i.freeram));
  220. update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
  221. pages_to_bytes(i.totalram));
  222. update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL,
  223. pages_to_bytes(available));
  224. }
  225. /*
  226. * While most virtqueues communicate guest-initiated requests to the hypervisor,
  227. * the stats queue operates in reverse. The driver initializes the virtqueue
  228. * with a single buffer. From that point forward, all conversations consist of
  229. * a hypervisor request (a call to this function) which directs us to refill
  230. * the virtqueue with a fresh stats buffer. Since stats collection can sleep,
  231. * we delegate the job to a freezable workqueue that will do the actual work via
  232. * stats_handle_request().
  233. */
  234. static void stats_request(struct virtqueue *vq)
  235. {
  236. struct virtio_balloon *vb = vq->vdev->priv;
  237. spin_lock(&vb->stop_update_lock);
  238. if (!vb->stop_update)
  239. queue_work(system_freezable_wq, &vb->update_balloon_stats_work);
  240. spin_unlock(&vb->stop_update_lock);
  241. }
  242. static void stats_handle_request(struct virtio_balloon *vb)
  243. {
  244. struct virtqueue *vq;
  245. struct scatterlist sg;
  246. unsigned int len;
  247. update_balloon_stats(vb);
  248. vq = vb->stats_vq;
  249. if (!virtqueue_get_buf(vq, &len))
  250. return;
  251. sg_init_one(&sg, vb->stats, sizeof(vb->stats));
  252. virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
  253. virtqueue_kick(vq);
  254. }
  255. static void virtballoon_changed(struct virtio_device *vdev)
  256. {
  257. struct virtio_balloon *vb = vdev->priv;
  258. unsigned long flags;
  259. spin_lock_irqsave(&vb->stop_update_lock, flags);
  260. if (!vb->stop_update)
  261. queue_work(system_freezable_wq, &vb->update_balloon_size_work);
  262. spin_unlock_irqrestore(&vb->stop_update_lock, flags);
  263. }
  264. static inline s64 towards_target(struct virtio_balloon *vb)
  265. {
  266. s64 target;
  267. u32 num_pages;
  268. virtio_cread(vb->vdev, struct virtio_balloon_config, num_pages,
  269. &num_pages);
  270. /* Legacy balloon config space is LE, unlike all other devices. */
  271. if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1))
  272. num_pages = le32_to_cpu((__force __le32)num_pages);
  273. target = num_pages;
  274. return target - vb->num_pages;
  275. }
  276. static void update_balloon_size(struct virtio_balloon *vb)
  277. {
  278. u32 actual = vb->num_pages;
  279. /* Legacy balloon config space is LE, unlike all other devices. */
  280. if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1))
  281. actual = (__force u32)cpu_to_le32(actual);
  282. virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual,
  283. &actual);
  284. }
  285. /*
  286. * virtballoon_oom_notify - release pages when system is under severe
  287. * memory pressure (called from out_of_memory())
  288. * @self : notifier block struct
  289. * @dummy: not used
  290. * @parm : returned - number of freed pages
  291. *
  292. * The balancing of memory by use of the virtio balloon should not cause
  293. * the termination of processes while there are pages in the balloon.
  294. * If virtio balloon manages to release some memory, it will make the
  295. * system return and retry the allocation that forced the OOM killer
  296. * to run.
  297. */
  298. static int virtballoon_oom_notify(struct notifier_block *self,
  299. unsigned long dummy, void *parm)
  300. {
  301. struct virtio_balloon *vb;
  302. unsigned long *freed;
  303. unsigned num_freed_pages;
  304. vb = container_of(self, struct virtio_balloon, nb);
  305. if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
  306. return NOTIFY_OK;
  307. freed = parm;
  308. num_freed_pages = leak_balloon(vb, oom_pages);
  309. update_balloon_size(vb);
  310. *freed += num_freed_pages;
  311. return NOTIFY_OK;
  312. }
  313. static void update_balloon_stats_func(struct work_struct *work)
  314. {
  315. struct virtio_balloon *vb;
  316. vb = container_of(work, struct virtio_balloon,
  317. update_balloon_stats_work);
  318. stats_handle_request(vb);
  319. }
  320. static void update_balloon_size_func(struct work_struct *work)
  321. {
  322. struct virtio_balloon *vb;
  323. s64 diff;
  324. vb = container_of(work, struct virtio_balloon,
  325. update_balloon_size_work);
  326. diff = towards_target(vb);
  327. if (diff > 0)
  328. diff -= fill_balloon(vb, diff);
  329. else if (diff < 0)
  330. diff += leak_balloon(vb, -diff);
  331. update_balloon_size(vb);
  332. if (diff)
  333. queue_work(system_freezable_wq, work);
  334. }
  335. static int init_vqs(struct virtio_balloon *vb)
  336. {
  337. struct virtqueue *vqs[3];
  338. vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request };
  339. static const char * const names[] = { "inflate", "deflate", "stats" };
  340. int err, nvqs;
  341. /*
  342. * We expect two virtqueues: inflate and deflate, and
  343. * optionally stat.
  344. */
  345. nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2;
  346. err = vb->vdev->config->find_vqs(vb->vdev, nvqs, vqs, callbacks, names);
  347. if (err)
  348. return err;
  349. vb->inflate_vq = vqs[0];
  350. vb->deflate_vq = vqs[1];
  351. if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
  352. struct scatterlist sg;
  353. vb->stats_vq = vqs[2];
  354. /*
  355. * Prime this virtqueue with one buffer so the hypervisor can
  356. * use it to signal us later (it can't be broken yet!).
  357. */
  358. update_balloon_stats(vb);
  359. sg_init_one(&sg, vb->stats, sizeof vb->stats);
  360. if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
  361. < 0)
  362. BUG();
  363. virtqueue_kick(vb->stats_vq);
  364. }
  365. return 0;
  366. }
  367. #ifdef CONFIG_BALLOON_COMPACTION
  368. /*
  369. * virtballoon_migratepage - perform the balloon page migration on behalf of
  370. * a compation thread. (called under page lock)
  371. * @vb_dev_info: the balloon device
  372. * @newpage: page that will replace the isolated page after migration finishes.
  373. * @page : the isolated (old) page that is about to be migrated to newpage.
  374. * @mode : compaction mode -- not used for balloon page migration.
  375. *
  376. * After a ballooned page gets isolated by compaction procedures, this is the
  377. * function that performs the page migration on behalf of a compaction thread
  378. * The page migration for virtio balloon is done in a simple swap fashion which
  379. * follows these two macro steps:
  380. * 1) insert newpage into vb->pages list and update the host about it;
  381. * 2) update the host about the old page removed from vb->pages list;
  382. *
  383. * This function preforms the balloon page migration task.
  384. * Called through balloon_mapping->a_ops->migratepage
  385. */
  386. static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
  387. struct page *newpage, struct page *page, enum migrate_mode mode)
  388. {
  389. struct virtio_balloon *vb = container_of(vb_dev_info,
  390. struct virtio_balloon, vb_dev_info);
  391. unsigned long flags;
  392. /*
  393. * In order to avoid lock contention while migrating pages concurrently
  394. * to leak_balloon() or fill_balloon() we just give up the balloon_lock
  395. * this turn, as it is easier to retry the page migration later.
  396. * This also prevents fill_balloon() getting stuck into a mutex
  397. * recursion in the case it ends up triggering memory compaction
  398. * while it is attempting to inflate the ballon.
  399. */
  400. if (!mutex_trylock(&vb->balloon_lock))
  401. return -EAGAIN;
  402. get_page(newpage); /* balloon reference */
  403. /* balloon's page migration 1st step -- inflate "newpage" */
  404. spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
  405. balloon_page_insert(vb_dev_info, newpage);
  406. vb_dev_info->isolated_pages--;
  407. __count_vm_event(BALLOON_MIGRATE);
  408. spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
  409. vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
  410. set_page_pfns(vb, vb->pfns, newpage);
  411. tell_host(vb, vb->inflate_vq);
  412. /* balloon's page migration 2nd step -- deflate "page" */
  413. balloon_page_delete(page);
  414. vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
  415. set_page_pfns(vb, vb->pfns, page);
  416. tell_host(vb, vb->deflate_vq);
  417. mutex_unlock(&vb->balloon_lock);
  418. put_page(page); /* balloon reference */
  419. return MIGRATEPAGE_SUCCESS;
  420. }
  421. static struct dentry *balloon_mount(struct file_system_type *fs_type,
  422. int flags, const char *dev_name, void *data)
  423. {
  424. static const struct dentry_operations ops = {
  425. .d_dname = simple_dname,
  426. };
  427. return mount_pseudo(fs_type, "balloon-kvm:", NULL, &ops,
  428. BALLOON_KVM_MAGIC);
  429. }
  430. static struct file_system_type balloon_fs = {
  431. .name = "balloon-kvm",
  432. .mount = balloon_mount,
  433. .kill_sb = kill_anon_super,
  434. };
  435. #endif /* CONFIG_BALLOON_COMPACTION */
  436. static int virtballoon_probe(struct virtio_device *vdev)
  437. {
  438. struct virtio_balloon *vb;
  439. int err;
  440. if (!vdev->config->get) {
  441. dev_err(&vdev->dev, "%s failure: config access disabled\n",
  442. __func__);
  443. return -EINVAL;
  444. }
  445. vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL);
  446. if (!vb) {
  447. err = -ENOMEM;
  448. goto out;
  449. }
  450. INIT_WORK(&vb->update_balloon_stats_work, update_balloon_stats_func);
  451. INIT_WORK(&vb->update_balloon_size_work, update_balloon_size_func);
  452. spin_lock_init(&vb->stop_update_lock);
  453. vb->stop_update = false;
  454. vb->num_pages = 0;
  455. mutex_init(&vb->balloon_lock);
  456. init_waitqueue_head(&vb->acked);
  457. vb->vdev = vdev;
  458. balloon_devinfo_init(&vb->vb_dev_info);
  459. err = init_vqs(vb);
  460. if (err)
  461. goto out_free_vb;
  462. vb->nb.notifier_call = virtballoon_oom_notify;
  463. vb->nb.priority = VIRTBALLOON_OOM_NOTIFY_PRIORITY;
  464. err = register_oom_notifier(&vb->nb);
  465. if (err < 0)
  466. goto out_del_vqs;
  467. #ifdef CONFIG_BALLOON_COMPACTION
  468. balloon_mnt = kern_mount(&balloon_fs);
  469. if (IS_ERR(balloon_mnt)) {
  470. err = PTR_ERR(balloon_mnt);
  471. unregister_oom_notifier(&vb->nb);
  472. goto out_del_vqs;
  473. }
  474. vb->vb_dev_info.migratepage = virtballoon_migratepage;
  475. vb->vb_dev_info.inode = alloc_anon_inode(balloon_mnt->mnt_sb);
  476. if (IS_ERR(vb->vb_dev_info.inode)) {
  477. err = PTR_ERR(vb->vb_dev_info.inode);
  478. kern_unmount(balloon_mnt);
  479. unregister_oom_notifier(&vb->nb);
  480. vb->vb_dev_info.inode = NULL;
  481. goto out_del_vqs;
  482. }
  483. vb->vb_dev_info.inode->i_mapping->a_ops = &balloon_aops;
  484. #endif
  485. virtio_device_ready(vdev);
  486. if (towards_target(vb))
  487. virtballoon_changed(vdev);
  488. return 0;
  489. out_del_vqs:
  490. vdev->config->del_vqs(vdev);
  491. out_free_vb:
  492. kfree(vb);
  493. out:
  494. return err;
  495. }
  496. static void remove_common(struct virtio_balloon *vb)
  497. {
  498. /* There might be pages left in the balloon: free them. */
  499. while (vb->num_pages)
  500. leak_balloon(vb, vb->num_pages);
  501. update_balloon_size(vb);
  502. /* Now we reset the device so we can clean up the queues. */
  503. vb->vdev->config->reset(vb->vdev);
  504. vb->vdev->config->del_vqs(vb->vdev);
  505. }
  506. static void virtballoon_remove(struct virtio_device *vdev)
  507. {
  508. struct virtio_balloon *vb = vdev->priv;
  509. unregister_oom_notifier(&vb->nb);
  510. spin_lock_irq(&vb->stop_update_lock);
  511. vb->stop_update = true;
  512. spin_unlock_irq(&vb->stop_update_lock);
  513. cancel_work_sync(&vb->update_balloon_size_work);
  514. cancel_work_sync(&vb->update_balloon_stats_work);
  515. remove_common(vb);
  516. #ifdef CONFIG_BALLOON_COMPACTION
  517. if (vb->vb_dev_info.inode)
  518. iput(vb->vb_dev_info.inode);
  519. kern_unmount(balloon_mnt);
  520. #endif
  521. kfree(vb);
  522. }
  523. #ifdef CONFIG_PM_SLEEP
  524. static int virtballoon_freeze(struct virtio_device *vdev)
  525. {
  526. struct virtio_balloon *vb = vdev->priv;
  527. /*
  528. * The workqueue is already frozen by the PM core before this
  529. * function is called.
  530. */
  531. remove_common(vb);
  532. return 0;
  533. }
  534. static int virtballoon_restore(struct virtio_device *vdev)
  535. {
  536. struct virtio_balloon *vb = vdev->priv;
  537. int ret;
  538. ret = init_vqs(vdev->priv);
  539. if (ret)
  540. return ret;
  541. virtio_device_ready(vdev);
  542. if (towards_target(vb))
  543. virtballoon_changed(vdev);
  544. update_balloon_size(vb);
  545. return 0;
  546. }
  547. #endif
  548. static unsigned int features[] = {
  549. VIRTIO_BALLOON_F_MUST_TELL_HOST,
  550. VIRTIO_BALLOON_F_STATS_VQ,
  551. VIRTIO_BALLOON_F_DEFLATE_ON_OOM,
  552. };
  553. static struct virtio_driver virtio_balloon_driver = {
  554. .feature_table = features,
  555. .feature_table_size = ARRAY_SIZE(features),
  556. .driver.name = KBUILD_MODNAME,
  557. .driver.owner = THIS_MODULE,
  558. .id_table = id_table,
  559. .probe = virtballoon_probe,
  560. .remove = virtballoon_remove,
  561. .config_changed = virtballoon_changed,
  562. #ifdef CONFIG_PM_SLEEP
  563. .freeze = virtballoon_freeze,
  564. .restore = virtballoon_restore,
  565. #endif
  566. };
  567. module_virtio_driver(virtio_balloon_driver);
  568. MODULE_DEVICE_TABLE(virtio, id_table);
  569. MODULE_DESCRIPTION("Virtio balloon driver");
  570. MODULE_LICENSE("GPL");