vmw_balloon.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318
  1. /*
  2. * VMware Balloon driver.
  3. *
  4. * Copyright (C) 2000-2014, VMware, Inc. All Rights Reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation; version 2 of the License and no later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  13. * NON INFRINGEMENT. See the GNU General Public License for more
  14. * details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19. *
  20. * Maintained by: Xavier Deguillard <xdeguillard@vmware.com>
  21. * Philip Moltmann <moltmann@vmware.com>
  22. */
  23. /*
  24. * This is VMware physical memory management driver for Linux. The driver
  25. * acts like a "balloon" that can be inflated to reclaim physical pages by
  26. * reserving them in the guest and invalidating them in the monitor,
  27. * freeing up the underlying machine pages so they can be allocated to
  28. * other guests. The balloon can also be deflated to allow the guest to
  29. * use more physical memory. Higher level policies can control the sizes
  30. * of balloons in VMs in order to manage physical memory resources.
  31. */
  32. //#define DEBUG
  33. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  34. #include <linux/types.h>
  35. #include <linux/kernel.h>
  36. #include <linux/mm.h>
  37. #include <linux/vmalloc.h>
  38. #include <linux/sched.h>
  39. #include <linux/module.h>
  40. #include <linux/workqueue.h>
  41. #include <linux/debugfs.h>
  42. #include <linux/seq_file.h>
  43. #include <linux/vmw_vmci_defs.h>
  44. #include <linux/vmw_vmci_api.h>
  45. #include <asm/hypervisor.h>
  46. MODULE_AUTHOR("VMware, Inc.");
  47. MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
  48. MODULE_VERSION("1.5.0.0-k");
  49. MODULE_ALIAS("dmi:*:svnVMware*:*");
  50. MODULE_ALIAS("vmware_vmmemctl");
  51. MODULE_LICENSE("GPL");
  52. /*
  53. * Various constants controlling rate of inflaint/deflating balloon,
  54. * measured in pages.
  55. */
  56. /*
  57. * Rates of memory allocaton when guest experiences memory pressure
  58. * (driver performs sleeping allocations).
  59. */
  60. #define VMW_BALLOON_RATE_ALLOC_MIN 512U
  61. #define VMW_BALLOON_RATE_ALLOC_MAX 2048U
  62. #define VMW_BALLOON_RATE_ALLOC_INC 16U
  63. /*
  64. * When guest is under memory pressure, use a reduced page allocation
  65. * rate for next several cycles.
  66. */
  67. #define VMW_BALLOON_SLOW_CYCLES 4
  68. /*
  69. * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
  70. * allow wait (__GFP_RECLAIM) for NOSLEEP page allocations. Use
  71. * __GFP_NOWARN, to suppress page allocation failure warnings.
  72. */
  73. #define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN)
  74. /*
  75. * Use GFP_HIGHUSER when executing in a separate kernel thread
  76. * context and allocation can sleep. This is less stressful to
  77. * the guest memory system, since it allows the thread to block
  78. * while memory is reclaimed, and won't take pages from emergency
  79. * low-memory pools.
  80. */
  81. #define VMW_PAGE_ALLOC_CANSLEEP (GFP_HIGHUSER)
  82. /* Maximum number of refused pages we accumulate during inflation cycle */
  83. #define VMW_BALLOON_MAX_REFUSED 16
  84. /*
  85. * Hypervisor communication port definitions.
  86. */
  87. #define VMW_BALLOON_HV_PORT 0x5670
  88. #define VMW_BALLOON_HV_MAGIC 0x456c6d6f
  89. #define VMW_BALLOON_GUEST_ID 1 /* Linux */
  90. enum vmwballoon_capabilities {
  91. /*
  92. * Bit 0 is reserved and not associated to any capability.
  93. */
  94. VMW_BALLOON_BASIC_CMDS = (1 << 1),
  95. VMW_BALLOON_BATCHED_CMDS = (1 << 2),
  96. VMW_BALLOON_BATCHED_2M_CMDS = (1 << 3),
  97. VMW_BALLOON_SIGNALLED_WAKEUP_CMD = (1 << 4),
  98. };
  99. #define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_BASIC_CMDS \
  100. | VMW_BALLOON_BATCHED_CMDS \
  101. | VMW_BALLOON_BATCHED_2M_CMDS \
  102. | VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
  103. #define VMW_BALLOON_2M_SHIFT (9)
  104. #define VMW_BALLOON_NUM_PAGE_SIZES (2)
  105. /*
  106. * Backdoor commands availability:
  107. *
  108. * START, GET_TARGET and GUEST_ID are always available,
  109. *
  110. * VMW_BALLOON_BASIC_CMDS:
  111. * LOCK and UNLOCK commands,
  112. * VMW_BALLOON_BATCHED_CMDS:
  113. * BATCHED_LOCK and BATCHED_UNLOCK commands.
  114. * VMW BALLOON_BATCHED_2M_CMDS:
  115. * BATCHED_2M_LOCK and BATCHED_2M_UNLOCK commands,
  116. * VMW VMW_BALLOON_SIGNALLED_WAKEUP_CMD:
  117. * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command.
  118. */
  119. #define VMW_BALLOON_CMD_START 0
  120. #define VMW_BALLOON_CMD_GET_TARGET 1
  121. #define VMW_BALLOON_CMD_LOCK 2
  122. #define VMW_BALLOON_CMD_UNLOCK 3
  123. #define VMW_BALLOON_CMD_GUEST_ID 4
  124. #define VMW_BALLOON_CMD_BATCHED_LOCK 6
  125. #define VMW_BALLOON_CMD_BATCHED_UNLOCK 7
  126. #define VMW_BALLOON_CMD_BATCHED_2M_LOCK 8
  127. #define VMW_BALLOON_CMD_BATCHED_2M_UNLOCK 9
  128. #define VMW_BALLOON_CMD_VMCI_DOORBELL_SET 10
  129. /* error codes */
  130. #define VMW_BALLOON_SUCCESS 0
  131. #define VMW_BALLOON_FAILURE -1
  132. #define VMW_BALLOON_ERROR_CMD_INVALID 1
  133. #define VMW_BALLOON_ERROR_PPN_INVALID 2
  134. #define VMW_BALLOON_ERROR_PPN_LOCKED 3
  135. #define VMW_BALLOON_ERROR_PPN_UNLOCKED 4
  136. #define VMW_BALLOON_ERROR_PPN_PINNED 5
  137. #define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6
  138. #define VMW_BALLOON_ERROR_RESET 7
  139. #define VMW_BALLOON_ERROR_BUSY 8
  140. #define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000)
  141. /* Batch page description */
  142. /*
  143. * Layout of a page in the batch page:
  144. *
  145. * +-------------+----------+--------+
  146. * | | | |
  147. * | Page number | Reserved | Status |
  148. * | | | |
  149. * +-------------+----------+--------+
  150. * 64 PAGE_SHIFT 6 0
  151. *
  152. * The reserved field should be set to 0.
  153. */
  154. #define VMW_BALLOON_BATCH_MAX_PAGES (PAGE_SIZE / sizeof(u64))
  155. #define VMW_BALLOON_BATCH_STATUS_MASK ((1UL << 5) - 1)
  156. #define VMW_BALLOON_BATCH_PAGE_MASK (~((1UL << PAGE_SHIFT) - 1))
  157. struct vmballoon_batch_page {
  158. u64 pages[VMW_BALLOON_BATCH_MAX_PAGES];
  159. };
  160. static u64 vmballoon_batch_get_pa(struct vmballoon_batch_page *batch, int idx)
  161. {
  162. return batch->pages[idx] & VMW_BALLOON_BATCH_PAGE_MASK;
  163. }
  164. static int vmballoon_batch_get_status(struct vmballoon_batch_page *batch,
  165. int idx)
  166. {
  167. return (int)(batch->pages[idx] & VMW_BALLOON_BATCH_STATUS_MASK);
  168. }
  169. static void vmballoon_batch_set_pa(struct vmballoon_batch_page *batch, int idx,
  170. u64 pa)
  171. {
  172. batch->pages[idx] = pa;
  173. }
  174. #define VMWARE_BALLOON_CMD(cmd, arg1, arg2, result) \
  175. ({ \
  176. unsigned long __status, __dummy1, __dummy2, __dummy3; \
  177. __asm__ __volatile__ ("inl %%dx" : \
  178. "=a"(__status), \
  179. "=c"(__dummy1), \
  180. "=d"(__dummy2), \
  181. "=b"(result), \
  182. "=S" (__dummy3) : \
  183. "0"(VMW_BALLOON_HV_MAGIC), \
  184. "1"(VMW_BALLOON_CMD_##cmd), \
  185. "2"(VMW_BALLOON_HV_PORT), \
  186. "3"(arg1), \
  187. "4" (arg2) : \
  188. "memory"); \
  189. if (VMW_BALLOON_CMD_##cmd == VMW_BALLOON_CMD_START) \
  190. result = __dummy1; \
  191. result &= -1UL; \
  192. __status & -1UL; \
  193. })
  194. #ifdef CONFIG_DEBUG_FS
  195. struct vmballoon_stats {
  196. unsigned int timer;
  197. unsigned int doorbell;
  198. /* allocation statistics */
  199. unsigned int alloc[VMW_BALLOON_NUM_PAGE_SIZES];
  200. unsigned int alloc_fail[VMW_BALLOON_NUM_PAGE_SIZES];
  201. unsigned int sleep_alloc;
  202. unsigned int sleep_alloc_fail;
  203. unsigned int refused_alloc[VMW_BALLOON_NUM_PAGE_SIZES];
  204. unsigned int refused_free[VMW_BALLOON_NUM_PAGE_SIZES];
  205. unsigned int free[VMW_BALLOON_NUM_PAGE_SIZES];
  206. /* monitor operations */
  207. unsigned int lock[VMW_BALLOON_NUM_PAGE_SIZES];
  208. unsigned int lock_fail[VMW_BALLOON_NUM_PAGE_SIZES];
  209. unsigned int unlock[VMW_BALLOON_NUM_PAGE_SIZES];
  210. unsigned int unlock_fail[VMW_BALLOON_NUM_PAGE_SIZES];
  211. unsigned int target;
  212. unsigned int target_fail;
  213. unsigned int start;
  214. unsigned int start_fail;
  215. unsigned int guest_type;
  216. unsigned int guest_type_fail;
  217. unsigned int doorbell_set;
  218. unsigned int doorbell_unset;
  219. };
  220. #define STATS_INC(stat) (stat)++
  221. #else
  222. #define STATS_INC(stat)
  223. #endif
  224. struct vmballoon;
  225. struct vmballoon_ops {
  226. void (*add_page)(struct vmballoon *b, int idx, struct page *p);
  227. int (*lock)(struct vmballoon *b, unsigned int num_pages,
  228. bool is_2m_pages, unsigned int *target);
  229. int (*unlock)(struct vmballoon *b, unsigned int num_pages,
  230. bool is_2m_pages, unsigned int *target);
  231. };
  232. struct vmballoon_page_size {
  233. /* list of reserved physical pages */
  234. struct list_head pages;
  235. /* transient list of non-balloonable pages */
  236. struct list_head refused_pages;
  237. unsigned int n_refused_pages;
  238. };
  239. struct vmballoon {
  240. struct vmballoon_page_size page_sizes[VMW_BALLOON_NUM_PAGE_SIZES];
  241. /* supported page sizes. 1 == 4k pages only, 2 == 4k and 2m pages */
  242. unsigned supported_page_sizes;
  243. /* balloon size in pages */
  244. unsigned int size;
  245. unsigned int target;
  246. /* reset flag */
  247. bool reset_required;
  248. /* adjustment rates (pages per second) */
  249. unsigned int rate_alloc;
  250. /* slowdown page allocations for next few cycles */
  251. unsigned int slow_allocation_cycles;
  252. unsigned long capabilities;
  253. struct vmballoon_batch_page *batch_page;
  254. unsigned int batch_max_pages;
  255. struct page *page;
  256. const struct vmballoon_ops *ops;
  257. #ifdef CONFIG_DEBUG_FS
  258. /* statistics */
  259. struct vmballoon_stats stats;
  260. /* debugfs file exporting statistics */
  261. struct dentry *dbg_entry;
  262. #endif
  263. struct sysinfo sysinfo;
  264. struct delayed_work dwork;
  265. struct vmci_handle vmci_doorbell;
  266. };
  267. static struct vmballoon balloon;
  268. /*
  269. * Send "start" command to the host, communicating supported version
  270. * of the protocol.
  271. */
  272. static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
  273. {
  274. unsigned long status, capabilities, dummy = 0;
  275. bool success;
  276. STATS_INC(b->stats.start);
  277. status = VMWARE_BALLOON_CMD(START, req_caps, dummy, capabilities);
  278. switch (status) {
  279. case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
  280. b->capabilities = capabilities;
  281. success = true;
  282. break;
  283. case VMW_BALLOON_SUCCESS:
  284. b->capabilities = VMW_BALLOON_BASIC_CMDS;
  285. success = true;
  286. break;
  287. default:
  288. success = false;
  289. }
  290. if (b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS)
  291. b->supported_page_sizes = 2;
  292. else
  293. b->supported_page_sizes = 1;
  294. if (!success) {
  295. pr_debug("%s - failed, hv returns %ld\n", __func__, status);
  296. STATS_INC(b->stats.start_fail);
  297. }
  298. return success;
  299. }
  300. static bool vmballoon_check_status(struct vmballoon *b, unsigned long status)
  301. {
  302. switch (status) {
  303. case VMW_BALLOON_SUCCESS:
  304. return true;
  305. case VMW_BALLOON_ERROR_RESET:
  306. b->reset_required = true;
  307. /* fall through */
  308. default:
  309. return false;
  310. }
  311. }
  312. /*
  313. * Communicate guest type to the host so that it can adjust ballooning
  314. * algorithm to the one most appropriate for the guest. This command
  315. * is normally issued after sending "start" command and is part of
  316. * standard reset sequence.
  317. */
  318. static bool vmballoon_send_guest_id(struct vmballoon *b)
  319. {
  320. unsigned long status, dummy = 0;
  321. status = VMWARE_BALLOON_CMD(GUEST_ID, VMW_BALLOON_GUEST_ID, dummy,
  322. dummy);
  323. STATS_INC(b->stats.guest_type);
  324. if (vmballoon_check_status(b, status))
  325. return true;
  326. pr_debug("%s - failed, hv returns %ld\n", __func__, status);
  327. STATS_INC(b->stats.guest_type_fail);
  328. return false;
  329. }
  330. static u16 vmballoon_page_size(bool is_2m_page)
  331. {
  332. if (is_2m_page)
  333. return 1 << VMW_BALLOON_2M_SHIFT;
  334. return 1;
  335. }
  336. /*
  337. * Retrieve desired balloon size from the host.
  338. */
  339. static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target)
  340. {
  341. unsigned long status;
  342. unsigned long target;
  343. unsigned long limit;
  344. unsigned long dummy = 0;
  345. u32 limit32;
  346. /*
  347. * si_meminfo() is cheap. Moreover, we want to provide dynamic
  348. * max balloon size later. So let us call si_meminfo() every
  349. * iteration.
  350. */
  351. si_meminfo(&b->sysinfo);
  352. limit = b->sysinfo.totalram;
  353. /* Ensure limit fits in 32-bits */
  354. limit32 = (u32)limit;
  355. if (limit != limit32)
  356. return false;
  357. /* update stats */
  358. STATS_INC(b->stats.target);
  359. status = VMWARE_BALLOON_CMD(GET_TARGET, limit, dummy, target);
  360. if (vmballoon_check_status(b, status)) {
  361. *new_target = target;
  362. return true;
  363. }
  364. pr_debug("%s - failed, hv returns %ld\n", __func__, status);
  365. STATS_INC(b->stats.target_fail);
  366. return false;
  367. }
  368. /*
  369. * Notify the host about allocated page so that host can use it without
  370. * fear that guest will need it. Host may reject some pages, we need to
  371. * check the return value and maybe submit a different page.
  372. */
  373. static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
  374. unsigned int *hv_status, unsigned int *target)
  375. {
  376. unsigned long status, dummy = 0;
  377. u32 pfn32;
  378. pfn32 = (u32)pfn;
  379. if (pfn32 != pfn)
  380. return -1;
  381. STATS_INC(b->stats.lock[false]);
  382. *hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy, *target);
  383. if (vmballoon_check_status(b, status))
  384. return 0;
  385. pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
  386. STATS_INC(b->stats.lock_fail[false]);
  387. return 1;
  388. }
  389. static int vmballoon_send_batched_lock(struct vmballoon *b,
  390. unsigned int num_pages, bool is_2m_pages, unsigned int *target)
  391. {
  392. unsigned long status;
  393. unsigned long pfn = page_to_pfn(b->page);
  394. STATS_INC(b->stats.lock[is_2m_pages]);
  395. if (is_2m_pages)
  396. status = VMWARE_BALLOON_CMD(BATCHED_2M_LOCK, pfn, num_pages,
  397. *target);
  398. else
  399. status = VMWARE_BALLOON_CMD(BATCHED_LOCK, pfn, num_pages,
  400. *target);
  401. if (vmballoon_check_status(b, status))
  402. return 0;
  403. pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__, pfn, status);
  404. STATS_INC(b->stats.lock_fail[is_2m_pages]);
  405. return 1;
  406. }
  407. /*
  408. * Notify the host that guest intends to release given page back into
  409. * the pool of available (to the guest) pages.
  410. */
  411. static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn,
  412. unsigned int *target)
  413. {
  414. unsigned long status, dummy = 0;
  415. u32 pfn32;
  416. pfn32 = (u32)pfn;
  417. if (pfn32 != pfn)
  418. return false;
  419. STATS_INC(b->stats.unlock[false]);
  420. status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy, *target);
  421. if (vmballoon_check_status(b, status))
  422. return true;
  423. pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
  424. STATS_INC(b->stats.unlock_fail[false]);
  425. return false;
  426. }
  427. static bool vmballoon_send_batched_unlock(struct vmballoon *b,
  428. unsigned int num_pages, bool is_2m_pages, unsigned int *target)
  429. {
  430. unsigned long status;
  431. unsigned long pfn = page_to_pfn(b->page);
  432. STATS_INC(b->stats.unlock[is_2m_pages]);
  433. if (is_2m_pages)
  434. status = VMWARE_BALLOON_CMD(BATCHED_2M_UNLOCK, pfn, num_pages,
  435. *target);
  436. else
  437. status = VMWARE_BALLOON_CMD(BATCHED_UNLOCK, pfn, num_pages,
  438. *target);
  439. if (vmballoon_check_status(b, status))
  440. return true;
  441. pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__, pfn, status);
  442. STATS_INC(b->stats.unlock_fail[is_2m_pages]);
  443. return false;
  444. }
  445. static struct page *vmballoon_alloc_page(gfp_t flags, bool is_2m_page)
  446. {
  447. if (is_2m_page)
  448. return alloc_pages(flags, VMW_BALLOON_2M_SHIFT);
  449. return alloc_page(flags);
  450. }
  451. static void vmballoon_free_page(struct page *page, bool is_2m_page)
  452. {
  453. if (is_2m_page)
  454. __free_pages(page, VMW_BALLOON_2M_SHIFT);
  455. else
  456. __free_page(page);
  457. }
  458. /*
  459. * Quickly release all pages allocated for the balloon. This function is
  460. * called when host decides to "reset" balloon for one reason or another.
  461. * Unlike normal "deflate" we do not (shall not) notify host of the pages
  462. * being released.
  463. */
  464. static void vmballoon_pop(struct vmballoon *b)
  465. {
  466. struct page *page, *next;
  467. unsigned is_2m_pages;
  468. for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES;
  469. is_2m_pages++) {
  470. struct vmballoon_page_size *page_size =
  471. &b->page_sizes[is_2m_pages];
  472. u16 size_per_page = vmballoon_page_size(is_2m_pages);
  473. list_for_each_entry_safe(page, next, &page_size->pages, lru) {
  474. list_del(&page->lru);
  475. vmballoon_free_page(page, is_2m_pages);
  476. STATS_INC(b->stats.free[is_2m_pages]);
  477. b->size -= size_per_page;
  478. cond_resched();
  479. }
  480. }
  481. if (b->batch_page) {
  482. vunmap(b->batch_page);
  483. b->batch_page = NULL;
  484. }
  485. if (b->page) {
  486. __free_page(b->page);
  487. b->page = NULL;
  488. }
  489. }
  490. /*
  491. * Notify the host of a ballooned page. If host rejects the page put it on the
  492. * refuse list, those refused page are then released at the end of the
  493. * inflation cycle.
  494. */
  495. static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
  496. bool is_2m_pages, unsigned int *target)
  497. {
  498. int locked, hv_status;
  499. struct page *page = b->page;
  500. struct vmballoon_page_size *page_size = &b->page_sizes[false];
  501. /* is_2m_pages can never happen as 2m pages support implies batching */
  502. locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
  503. target);
  504. if (locked > 0) {
  505. STATS_INC(b->stats.refused_alloc[false]);
  506. if (hv_status == VMW_BALLOON_ERROR_RESET ||
  507. hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
  508. vmballoon_free_page(page, false);
  509. return -EIO;
  510. }
  511. /*
  512. * Place page on the list of non-balloonable pages
  513. * and retry allocation, unless we already accumulated
  514. * too many of them, in which case take a breather.
  515. */
  516. if (page_size->n_refused_pages < VMW_BALLOON_MAX_REFUSED) {
  517. page_size->n_refused_pages++;
  518. list_add(&page->lru, &page_size->refused_pages);
  519. } else {
  520. vmballoon_free_page(page, false);
  521. }
  522. return -EIO;
  523. }
  524. /* track allocated page */
  525. list_add(&page->lru, &page_size->pages);
  526. /* update balloon size */
  527. b->size++;
  528. return 0;
  529. }
  530. static int vmballoon_lock_batched_page(struct vmballoon *b,
  531. unsigned int num_pages, bool is_2m_pages, unsigned int *target)
  532. {
  533. int locked, i;
  534. u16 size_per_page = vmballoon_page_size(is_2m_pages);
  535. locked = vmballoon_send_batched_lock(b, num_pages, is_2m_pages,
  536. target);
  537. if (locked > 0) {
  538. for (i = 0; i < num_pages; i++) {
  539. u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
  540. struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
  541. vmballoon_free_page(p, is_2m_pages);
  542. }
  543. return -EIO;
  544. }
  545. for (i = 0; i < num_pages; i++) {
  546. u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
  547. struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
  548. struct vmballoon_page_size *page_size =
  549. &b->page_sizes[is_2m_pages];
  550. locked = vmballoon_batch_get_status(b->batch_page, i);
  551. switch (locked) {
  552. case VMW_BALLOON_SUCCESS:
  553. list_add(&p->lru, &page_size->pages);
  554. b->size += size_per_page;
  555. break;
  556. case VMW_BALLOON_ERROR_PPN_PINNED:
  557. case VMW_BALLOON_ERROR_PPN_INVALID:
  558. if (page_size->n_refused_pages
  559. < VMW_BALLOON_MAX_REFUSED) {
  560. list_add(&p->lru, &page_size->refused_pages);
  561. page_size->n_refused_pages++;
  562. break;
  563. }
  564. /* Fallthrough */
  565. case VMW_BALLOON_ERROR_RESET:
  566. case VMW_BALLOON_ERROR_PPN_NOTNEEDED:
  567. vmballoon_free_page(p, is_2m_pages);
  568. break;
  569. default:
  570. /* This should never happen */
  571. WARN_ON_ONCE(true);
  572. }
  573. }
  574. return 0;
  575. }
  576. /*
  577. * Release the page allocated for the balloon. Note that we first notify
  578. * the host so it can make sure the page will be available for the guest
  579. * to use, if needed.
  580. */
  581. static int vmballoon_unlock_page(struct vmballoon *b, unsigned int num_pages,
  582. bool is_2m_pages, unsigned int *target)
  583. {
  584. struct page *page = b->page;
  585. struct vmballoon_page_size *page_size = &b->page_sizes[false];
  586. /* is_2m_pages can never happen as 2m pages support implies batching */
  587. if (!vmballoon_send_unlock_page(b, page_to_pfn(page), target)) {
  588. list_add(&page->lru, &page_size->pages);
  589. return -EIO;
  590. }
  591. /* deallocate page */
  592. vmballoon_free_page(page, false);
  593. STATS_INC(b->stats.free[false]);
  594. /* update balloon size */
  595. b->size--;
  596. return 0;
  597. }
  598. static int vmballoon_unlock_batched_page(struct vmballoon *b,
  599. unsigned int num_pages, bool is_2m_pages,
  600. unsigned int *target)
  601. {
  602. int locked, i, ret = 0;
  603. bool hv_success;
  604. u16 size_per_page = vmballoon_page_size(is_2m_pages);
  605. hv_success = vmballoon_send_batched_unlock(b, num_pages, is_2m_pages,
  606. target);
  607. if (!hv_success)
  608. ret = -EIO;
  609. for (i = 0; i < num_pages; i++) {
  610. u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
  611. struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
  612. struct vmballoon_page_size *page_size =
  613. &b->page_sizes[is_2m_pages];
  614. locked = vmballoon_batch_get_status(b->batch_page, i);
  615. if (!hv_success || locked != VMW_BALLOON_SUCCESS) {
  616. /*
  617. * That page wasn't successfully unlocked by the
  618. * hypervisor, re-add it to the list of pages owned by
  619. * the balloon driver.
  620. */
  621. list_add(&p->lru, &page_size->pages);
  622. } else {
  623. /* deallocate page */
  624. vmballoon_free_page(p, is_2m_pages);
  625. STATS_INC(b->stats.free[is_2m_pages]);
  626. /* update balloon size */
  627. b->size -= size_per_page;
  628. }
  629. }
  630. return ret;
  631. }
  632. /*
  633. * Release pages that were allocated while attempting to inflate the
  634. * balloon but were refused by the host for one reason or another.
  635. */
  636. static void vmballoon_release_refused_pages(struct vmballoon *b,
  637. bool is_2m_pages)
  638. {
  639. struct page *page, *next;
  640. struct vmballoon_page_size *page_size =
  641. &b->page_sizes[is_2m_pages];
  642. list_for_each_entry_safe(page, next, &page_size->refused_pages, lru) {
  643. list_del(&page->lru);
  644. vmballoon_free_page(page, is_2m_pages);
  645. STATS_INC(b->stats.refused_free[is_2m_pages]);
  646. }
  647. page_size->n_refused_pages = 0;
  648. }
  649. static void vmballoon_add_page(struct vmballoon *b, int idx, struct page *p)
  650. {
  651. b->page = p;
  652. }
  653. static void vmballoon_add_batched_page(struct vmballoon *b, int idx,
  654. struct page *p)
  655. {
  656. vmballoon_batch_set_pa(b->batch_page, idx,
  657. (u64)page_to_pfn(p) << PAGE_SHIFT);
  658. }
  659. /*
  660. * Inflate the balloon towards its target size. Note that we try to limit
  661. * the rate of allocation to make sure we are not choking the rest of the
  662. * system.
  663. */
  664. static void vmballoon_inflate(struct vmballoon *b)
  665. {
  666. unsigned rate;
  667. unsigned int allocations = 0;
  668. unsigned int num_pages = 0;
  669. int error = 0;
  670. gfp_t flags = VMW_PAGE_ALLOC_NOSLEEP;
  671. bool is_2m_pages;
  672. pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
  673. /*
  674. * First try NOSLEEP page allocations to inflate balloon.
  675. *
  676. * If we do not throttle nosleep allocations, we can drain all
  677. * free pages in the guest quickly (if the balloon target is high).
  678. * As a side-effect, draining free pages helps to inform (force)
  679. * the guest to start swapping if balloon target is not met yet,
  680. * which is a desired behavior. However, balloon driver can consume
  681. * all available CPU cycles if too many pages are allocated in a
  682. * second. Therefore, we throttle nosleep allocations even when
  683. * the guest is not under memory pressure. OTOH, if we have already
  684. * predicted that the guest is under memory pressure, then we
  685. * slowdown page allocations considerably.
  686. */
  687. /*
  688. * Start with no sleep allocation rate which may be higher
  689. * than sleeping allocation rate.
  690. */
  691. if (b->slow_allocation_cycles) {
  692. rate = b->rate_alloc;
  693. is_2m_pages = false;
  694. } else {
  695. rate = UINT_MAX;
  696. is_2m_pages =
  697. b->supported_page_sizes == VMW_BALLOON_NUM_PAGE_SIZES;
  698. }
  699. pr_debug("%s - goal: %d, no-sleep rate: %u, sleep rate: %d\n",
  700. __func__, b->target - b->size, rate, b->rate_alloc);
  701. while (!b->reset_required &&
  702. b->size + num_pages * vmballoon_page_size(is_2m_pages)
  703. < b->target) {
  704. struct page *page;
  705. if (flags == VMW_PAGE_ALLOC_NOSLEEP)
  706. STATS_INC(b->stats.alloc[is_2m_pages]);
  707. else
  708. STATS_INC(b->stats.sleep_alloc);
  709. page = vmballoon_alloc_page(flags, is_2m_pages);
  710. if (!page) {
  711. STATS_INC(b->stats.alloc_fail[is_2m_pages]);
  712. if (is_2m_pages) {
  713. b->ops->lock(b, num_pages, true, &b->target);
  714. /*
  715. * ignore errors from locking as we now switch
  716. * to 4k pages and we might get different
  717. * errors.
  718. */
  719. num_pages = 0;
  720. is_2m_pages = false;
  721. continue;
  722. }
  723. if (flags == VMW_PAGE_ALLOC_CANSLEEP) {
  724. /*
  725. * CANSLEEP page allocation failed, so guest
  726. * is under severe memory pressure. Quickly
  727. * decrease allocation rate.
  728. */
  729. b->rate_alloc = max(b->rate_alloc / 2,
  730. VMW_BALLOON_RATE_ALLOC_MIN);
  731. STATS_INC(b->stats.sleep_alloc_fail);
  732. break;
  733. }
  734. /*
  735. * NOSLEEP page allocation failed, so the guest is
  736. * under memory pressure. Let us slow down page
  737. * allocations for next few cycles so that the guest
  738. * gets out of memory pressure. Also, if we already
  739. * allocated b->rate_alloc pages, let's pause,
  740. * otherwise switch to sleeping allocations.
  741. */
  742. b->slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES;
  743. if (allocations >= b->rate_alloc)
  744. break;
  745. flags = VMW_PAGE_ALLOC_CANSLEEP;
  746. /* Lower rate for sleeping allocations. */
  747. rate = b->rate_alloc;
  748. continue;
  749. }
  750. b->ops->add_page(b, num_pages++, page);
  751. if (num_pages == b->batch_max_pages) {
  752. error = b->ops->lock(b, num_pages, is_2m_pages,
  753. &b->target);
  754. num_pages = 0;
  755. if (error)
  756. break;
  757. }
  758. cond_resched();
  759. if (allocations >= rate) {
  760. /* We allocated enough pages, let's take a break. */
  761. break;
  762. }
  763. }
  764. if (num_pages > 0)
  765. b->ops->lock(b, num_pages, is_2m_pages, &b->target);
  766. /*
  767. * We reached our goal without failures so try increasing
  768. * allocation rate.
  769. */
  770. if (error == 0 && allocations >= b->rate_alloc) {
  771. unsigned int mult = allocations / b->rate_alloc;
  772. b->rate_alloc =
  773. min(b->rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC,
  774. VMW_BALLOON_RATE_ALLOC_MAX);
  775. }
  776. vmballoon_release_refused_pages(b, true);
  777. vmballoon_release_refused_pages(b, false);
  778. }
  779. /*
  780. * Decrease the size of the balloon allowing guest to use more memory.
  781. */
  782. static void vmballoon_deflate(struct vmballoon *b)
  783. {
  784. unsigned is_2m_pages;
  785. pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
  786. /* free pages to reach target */
  787. for (is_2m_pages = 0; is_2m_pages < b->supported_page_sizes;
  788. is_2m_pages++) {
  789. struct page *page, *next;
  790. unsigned int num_pages = 0;
  791. struct vmballoon_page_size *page_size =
  792. &b->page_sizes[is_2m_pages];
  793. list_for_each_entry_safe(page, next, &page_size->pages, lru) {
  794. if (b->reset_required ||
  795. (b->target > 0 &&
  796. b->size - num_pages
  797. * vmballoon_page_size(is_2m_pages)
  798. < b->target + vmballoon_page_size(true)))
  799. break;
  800. list_del(&page->lru);
  801. b->ops->add_page(b, num_pages++, page);
  802. if (num_pages == b->batch_max_pages) {
  803. int error;
  804. error = b->ops->unlock(b, num_pages,
  805. is_2m_pages, &b->target);
  806. num_pages = 0;
  807. if (error)
  808. return;
  809. }
  810. cond_resched();
  811. }
  812. if (num_pages > 0)
  813. b->ops->unlock(b, num_pages, is_2m_pages, &b->target);
  814. }
  815. }
  816. static const struct vmballoon_ops vmballoon_basic_ops = {
  817. .add_page = vmballoon_add_page,
  818. .lock = vmballoon_lock_page,
  819. .unlock = vmballoon_unlock_page
  820. };
  821. static const struct vmballoon_ops vmballoon_batched_ops = {
  822. .add_page = vmballoon_add_batched_page,
  823. .lock = vmballoon_lock_batched_page,
  824. .unlock = vmballoon_unlock_batched_page
  825. };
  826. static bool vmballoon_init_batching(struct vmballoon *b)
  827. {
  828. b->page = alloc_page(VMW_PAGE_ALLOC_NOSLEEP);
  829. if (!b->page)
  830. return false;
  831. b->batch_page = vmap(&b->page, 1, VM_MAP, PAGE_KERNEL);
  832. if (!b->batch_page) {
  833. __free_page(b->page);
  834. return false;
  835. }
  836. return true;
  837. }
  838. /*
  839. * Receive notification and resize balloon
  840. */
  841. static void vmballoon_doorbell(void *client_data)
  842. {
  843. struct vmballoon *b = client_data;
  844. STATS_INC(b->stats.doorbell);
  845. mod_delayed_work(system_freezable_wq, &b->dwork, 0);
  846. }
  847. /*
  848. * Clean up vmci doorbell
  849. */
  850. static void vmballoon_vmci_cleanup(struct vmballoon *b)
  851. {
  852. int error;
  853. VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, VMCI_INVALID_ID,
  854. VMCI_INVALID_ID, error);
  855. STATS_INC(b->stats.doorbell_unset);
  856. if (!vmci_handle_is_invalid(b->vmci_doorbell)) {
  857. vmci_doorbell_destroy(b->vmci_doorbell);
  858. b->vmci_doorbell = VMCI_INVALID_HANDLE;
  859. }
  860. }
  861. /*
  862. * Initialize vmci doorbell, to get notified as soon as balloon changes
  863. */
  864. static int vmballoon_vmci_init(struct vmballoon *b)
  865. {
  866. int error = 0;
  867. if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) != 0) {
  868. error = vmci_doorbell_create(&b->vmci_doorbell,
  869. VMCI_FLAG_DELAYED_CB,
  870. VMCI_PRIVILEGE_FLAG_RESTRICTED,
  871. vmballoon_doorbell, b);
  872. if (error == VMCI_SUCCESS) {
  873. VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET,
  874. b->vmci_doorbell.context,
  875. b->vmci_doorbell.resource, error);
  876. STATS_INC(b->stats.doorbell_set);
  877. }
  878. }
  879. if (error != 0) {
  880. vmballoon_vmci_cleanup(b);
  881. return -EIO;
  882. }
  883. return 0;
  884. }
  885. /*
  886. * Perform standard reset sequence by popping the balloon (in case it
  887. * is not empty) and then restarting protocol. This operation normally
  888. * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
  889. */
  890. static void vmballoon_reset(struct vmballoon *b)
  891. {
  892. int error;
  893. vmballoon_vmci_cleanup(b);
  894. /* free all pages, skipping monitor unlock */
  895. vmballoon_pop(b);
  896. if (!vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
  897. return;
  898. if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
  899. b->ops = &vmballoon_batched_ops;
  900. b->batch_max_pages = VMW_BALLOON_BATCH_MAX_PAGES;
  901. if (!vmballoon_init_batching(b)) {
  902. /*
  903. * We failed to initialize batching, inform the monitor
  904. * about it by sending a null capability.
  905. *
  906. * The guest will retry in one second.
  907. */
  908. vmballoon_send_start(b, 0);
  909. return;
  910. }
  911. } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
  912. b->ops = &vmballoon_basic_ops;
  913. b->batch_max_pages = 1;
  914. }
  915. b->reset_required = false;
  916. error = vmballoon_vmci_init(b);
  917. if (error)
  918. pr_err("failed to initialize vmci doorbell\n");
  919. if (!vmballoon_send_guest_id(b))
  920. pr_err("failed to send guest ID to the host\n");
  921. }
  922. /*
  923. * Balloon work function: reset protocol, if needed, get the new size and
  924. * adjust balloon as needed. Repeat in 1 sec.
  925. */
  926. static void vmballoon_work(struct work_struct *work)
  927. {
  928. struct delayed_work *dwork = to_delayed_work(work);
  929. struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
  930. unsigned int target;
  931. STATS_INC(b->stats.timer);
  932. if (b->reset_required)
  933. vmballoon_reset(b);
  934. if (b->slow_allocation_cycles > 0)
  935. b->slow_allocation_cycles--;
  936. if (!b->reset_required && vmballoon_send_get_target(b, &target)) {
  937. /* update target, adjust size */
  938. b->target = target;
  939. if (b->size < target)
  940. vmballoon_inflate(b);
  941. else if (target == 0 ||
  942. b->size > target + vmballoon_page_size(true))
  943. vmballoon_deflate(b);
  944. }
  945. /*
  946. * We are using a freezable workqueue so that balloon operations are
  947. * stopped while the system transitions to/from sleep/hibernation.
  948. */
  949. queue_delayed_work(system_freezable_wq,
  950. dwork, round_jiffies_relative(HZ));
  951. }
  952. /*
  953. * DEBUGFS Interface
  954. */
  955. #ifdef CONFIG_DEBUG_FS
  956. static int vmballoon_debug_show(struct seq_file *f, void *offset)
  957. {
  958. struct vmballoon *b = f->private;
  959. struct vmballoon_stats *stats = &b->stats;
  960. /* format capabilities info */
  961. seq_printf(f,
  962. "balloon capabilities: %#4x\n"
  963. "used capabilities: %#4lx\n"
  964. "is resetting: %c\n",
  965. VMW_BALLOON_CAPABILITIES, b->capabilities,
  966. b->reset_required ? 'y' : 'n');
  967. /* format size info */
  968. seq_printf(f,
  969. "target: %8d pages\n"
  970. "current: %8d pages\n",
  971. b->target, b->size);
  972. /* format rate info */
  973. seq_printf(f,
  974. "rateSleepAlloc: %8d pages/sec\n",
  975. b->rate_alloc);
  976. seq_printf(f,
  977. "\n"
  978. "timer: %8u\n"
  979. "doorbell: %8u\n"
  980. "start: %8u (%4u failed)\n"
  981. "guestType: %8u (%4u failed)\n"
  982. "2m-lock: %8u (%4u failed)\n"
  983. "lock: %8u (%4u failed)\n"
  984. "2m-unlock: %8u (%4u failed)\n"
  985. "unlock: %8u (%4u failed)\n"
  986. "target: %8u (%4u failed)\n"
  987. "prim2mAlloc: %8u (%4u failed)\n"
  988. "primNoSleepAlloc: %8u (%4u failed)\n"
  989. "primCanSleepAlloc: %8u (%4u failed)\n"
  990. "prim2mFree: %8u\n"
  991. "primFree: %8u\n"
  992. "err2mAlloc: %8u\n"
  993. "errAlloc: %8u\n"
  994. "err2mFree: %8u\n"
  995. "errFree: %8u\n"
  996. "doorbellSet: %8u\n"
  997. "doorbellUnset: %8u\n",
  998. stats->timer,
  999. stats->doorbell,
  1000. stats->start, stats->start_fail,
  1001. stats->guest_type, stats->guest_type_fail,
  1002. stats->lock[true], stats->lock_fail[true],
  1003. stats->lock[false], stats->lock_fail[false],
  1004. stats->unlock[true], stats->unlock_fail[true],
  1005. stats->unlock[false], stats->unlock_fail[false],
  1006. stats->target, stats->target_fail,
  1007. stats->alloc[true], stats->alloc_fail[true],
  1008. stats->alloc[false], stats->alloc_fail[false],
  1009. stats->sleep_alloc, stats->sleep_alloc_fail,
  1010. stats->free[true],
  1011. stats->free[false],
  1012. stats->refused_alloc[true], stats->refused_alloc[false],
  1013. stats->refused_free[true], stats->refused_free[false],
  1014. stats->doorbell_set, stats->doorbell_unset);
  1015. return 0;
  1016. }
  1017. static int vmballoon_debug_open(struct inode *inode, struct file *file)
  1018. {
  1019. return single_open(file, vmballoon_debug_show, inode->i_private);
  1020. }
  1021. static const struct file_operations vmballoon_debug_fops = {
  1022. .owner = THIS_MODULE,
  1023. .open = vmballoon_debug_open,
  1024. .read = seq_read,
  1025. .llseek = seq_lseek,
  1026. .release = single_release,
  1027. };
  1028. static int __init vmballoon_debugfs_init(struct vmballoon *b)
  1029. {
  1030. int error;
  1031. b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
  1032. &vmballoon_debug_fops);
  1033. if (IS_ERR(b->dbg_entry)) {
  1034. error = PTR_ERR(b->dbg_entry);
  1035. pr_err("failed to create debugfs entry, error: %d\n", error);
  1036. return error;
  1037. }
  1038. return 0;
  1039. }
  1040. static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
  1041. {
  1042. debugfs_remove(b->dbg_entry);
  1043. }
  1044. #else
  1045. static inline int vmballoon_debugfs_init(struct vmballoon *b)
  1046. {
  1047. return 0;
  1048. }
  1049. static inline void vmballoon_debugfs_exit(struct vmballoon *b)
  1050. {
  1051. }
  1052. #endif /* CONFIG_DEBUG_FS */
  1053. static int __init vmballoon_init(void)
  1054. {
  1055. int error;
  1056. unsigned is_2m_pages;
  1057. /*
  1058. * Check if we are running on VMware's hypervisor and bail out
  1059. * if we are not.
  1060. */
  1061. if (x86_hyper != &x86_hyper_vmware)
  1062. return -ENODEV;
  1063. for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES;
  1064. is_2m_pages++) {
  1065. INIT_LIST_HEAD(&balloon.page_sizes[is_2m_pages].pages);
  1066. INIT_LIST_HEAD(&balloon.page_sizes[is_2m_pages].refused_pages);
  1067. }
  1068. /* initialize rates */
  1069. balloon.rate_alloc = VMW_BALLOON_RATE_ALLOC_MAX;
  1070. INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
  1071. error = vmballoon_debugfs_init(&balloon);
  1072. if (error)
  1073. return error;
  1074. balloon.vmci_doorbell = VMCI_INVALID_HANDLE;
  1075. balloon.batch_page = NULL;
  1076. balloon.page = NULL;
  1077. balloon.reset_required = true;
  1078. queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
  1079. return 0;
  1080. }
  1081. module_init(vmballoon_init);
  1082. static void __exit vmballoon_exit(void)
  1083. {
  1084. vmballoon_vmci_cleanup(&balloon);
  1085. cancel_delayed_work_sync(&balloon.dwork);
  1086. vmballoon_debugfs_exit(&balloon);
  1087. /*
  1088. * Deallocate all reserved memory, and reset connection with monitor.
  1089. * Reset connection before deallocating memory to avoid potential for
  1090. * additional spurious resets from guest touching deallocated pages.
  1091. */
  1092. vmballoon_send_start(&balloon, 0);
  1093. vmballoon_pop(&balloon);
  1094. }
  1095. module_exit(vmballoon_exit);