1
0
mirror of git://projects.qi-hardware.com/openwrt-xburst.git synced 2024-12-18 12:00:16 +02:00
openwrt-xburst/target/linux/goldfish/patches-2.6.30/0055-mm-Add-min_free_order_shift-tunable.patch
juhosg 61a476da4d kernel: upgrade to 2.6.30.8 and refresh patches
git-svn-id: svn://svn.openwrt.org/openwrt/trunk@17804 3c298f89-4303-0410-b956-a3cf2f4a3e73
2009-10-01 10:07:32 +00:00

63 lines
1.9 KiB
Diff

From d620f695290e4ffb1586420ba1dbbb5b2c8c075d Mon Sep 17 00:00:00 2001
From: =?utf-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= <arve@android.com>
Date: Tue, 17 Feb 2009 14:51:02 -0800
Subject: [PATCH 055/134] mm: Add min_free_order_shift tunable.
MIME-Version: 1.0
Content-Type: text/plain; charset=utf-8
Content-Transfer-Encoding: 8bit
By default the kernel tries to keep half as much memory free at each
order as it does for one order below. This can be too agressive when
running without swap.
Signed-off-by: Arve Hjønnevåg <arve@android.com>
---
kernel/sysctl.c | 9 +++++++++
mm/page_alloc.c | 3 ++-
2 files changed, 11 insertions(+), 1 deletions(-)
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -77,6 +77,7 @@ extern int suid_dumpable;
extern char core_pattern[];
extern int pid_max;
extern int min_free_kbytes;
+extern int min_free_order_shift;
extern int pid_max_min, pid_max_max;
extern int sysctl_drop_caches;
extern int percpu_pagelist_fraction;
@@ -1138,6 +1139,14 @@ static struct ctl_table vm_table[] = {
.extra1 = &zero,
},
{
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "min_free_order_shift",
+ .data = &min_free_order_shift,
+ .maxlen = sizeof(min_free_order_shift),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec
+ },
+ {
.ctl_name = VM_PERCPU_PAGELIST_FRACTION,
.procname = "percpu_pagelist_fraction",
.data = &percpu_pagelist_fraction,
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -119,6 +119,7 @@ static char * const zone_names[MAX_NR_ZO
};
int min_free_kbytes = 1024;
+int min_free_order_shift = 1;
unsigned long __meminitdata nr_kernel_pages;
unsigned long __meminitdata nr_all_pages;
@@ -1258,7 +1259,7 @@ int zone_watermark_ok(struct zone *z, in
free_pages -= z->free_area[o].nr_free << o;
/* Require fewer higher order pages to be free */
- min >>= 1;
+ min >>= min_free_order_shift;
if (free_pages <= min)
return 0;