summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDmitry Torokhov <dtor@vmware.com>2011-01-12 17:01:07 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 08:03:22 -0800
commitd27a0c06ec31aadc3582f50cd7b88855922e95ae (patch)
tree973b581f14c05e5de869468d5771ffcdba4fe063
parent17fecb5582962c2ca5627a51ec9ab0979fb673ef (diff)
VMware balloon: stop locking pages when hypervisor tells us enough
When hypervisor decides to decrease target balloon size while the balloon driver tries to lock pages hypervisor may respond with VMW_BALLOON_PPN_NOTNEEDED. Use this data and immediately stop reserving pages and wait for the next update cycle to fetch new target instead of continuing trying to lock pages until size of refused list grows above VMW_BALLOON_MAX_REFUSED (16) pages. As a result the driver stops bothering the hypervisor with its attempts to lock more pages that are not needed anymore. Most likely next order from hypervisor will be to reduce ballon size anyway. It is a small optimization. Signed-off-by: Dmitry Torokhov <dtor@vmware.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--drivers/misc/vmw_balloon.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 2a1e804a71a..4d2ea8e8014 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -45,7 +45,7 @@
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
-MODULE_VERSION("1.2.1.1-k");
+MODULE_VERSION("1.2.1.2-k");
MODULE_ALIAS("dmi:*:svnVMware*:*");
MODULE_ALIAS("vmware_vmmemctl");
MODULE_LICENSE("GPL");
@@ -315,7 +315,8 @@ static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target)
* fear that guest will need it. Host may reject some pages, we need to
* check the return value and maybe submit a different page.
*/
-static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn)
+static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
+ unsigned int *hv_status)
{
unsigned long status, dummy;
u32 pfn32;
@@ -326,7 +327,7 @@ static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn)
STATS_INC(b->stats.lock);
- status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy);
+ *hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy);
if (vmballoon_check_status(b, status))
return true;
@@ -410,6 +411,7 @@ static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
{
struct page *page;
gfp_t flags;
+ unsigned int hv_status;
bool locked = false;
do {
@@ -429,11 +431,12 @@ static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
}
/* inform monitor */
- locked = vmballoon_send_lock_page(b, page_to_pfn(page));
+ locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status);
if (!locked) {
STATS_INC(b->stats.refused_alloc);
- if (b->reset_required) {
+ if (hv_status == VMW_BALLOON_ERROR_RESET ||
+ hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
__free_page(page);
return -EIO;
}