ksm: remove unswappable max_kernel_pages

Now that ksm pages are swappable, and the known holes plugged, remove
mention of unswappable kernel pages from KSM documentation and comments.

Remove the totalram_pages/4 initialization of max_kernel_pages.  In fact,
remove max_kernel_pages altogether - we can reinstate it if removal turns
out to break someone's script; but if we later want to limit KSM's memory
usage, limiting the stable nodes would not be an effective approach.

Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: Izik Eidus <ieidus@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Chris Wright <chrisw@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Tento commit je obsažen v:
Hugh Dickins
2009-12-14 17:59:34 -08:00
odevzdal Linus Torvalds
rodič 62b61f611e
revize d0f209f68f
3 změnil soubory, kde provedl 10 přidání a 55 odebrání

Zobrazit soubor

@@ -179,9 +179,6 @@ static unsigned long ksm_pages_unshared;
/* The number of rmap_items in use: to calculate pages_volatile */
static unsigned long ksm_rmap_items;
/* Limit on the number of unswappable pages used */
static unsigned long ksm_max_kernel_pages;
/* Number of pages ksmd should scan in one batch */
static unsigned int ksm_thread_pages_to_scan = 100;
@@ -943,14 +940,6 @@ static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
{
int err;
/*
* The number of nodes in the stable tree
* is the number of kernel pages that we hold.
*/
if (ksm_max_kernel_pages &&
ksm_max_kernel_pages <= ksm_pages_shared)
return NULL;
err = try_to_merge_with_ksm_page(rmap_item, page, NULL);
if (!err) {
err = try_to_merge_with_ksm_page(tree_rmap_item,
@@ -1850,8 +1839,8 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
/*
* KSM_RUN_MERGE sets ksmd running, and 0 stops it running.
* KSM_RUN_UNMERGE stops it running and unmerges all rmap_items,
* breaking COW to free the unswappable pages_shared (but leaves
* mm_slots on the list for when ksmd may be set running again).
* breaking COW to free the pages_shared (but leaves mm_slots
* on the list for when ksmd may be set running again).
*/
mutex_lock(&ksm_thread_mutex);
@@ -1876,29 +1865,6 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
}
KSM_ATTR(run);
static ssize_t max_kernel_pages_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
int err;
unsigned long nr_pages;
err = strict_strtoul(buf, 10, &nr_pages);
if (err)
return -EINVAL;
ksm_max_kernel_pages = nr_pages;
return count;
}
static ssize_t max_kernel_pages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%lu\n", ksm_max_kernel_pages);
}
KSM_ATTR(max_kernel_pages);
static ssize_t pages_shared_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -1948,7 +1914,6 @@ static struct attribute *ksm_attrs[] = {
&sleep_millisecs_attr.attr,
&pages_to_scan_attr.attr,
&run_attr.attr,
&max_kernel_pages_attr.attr,
&pages_shared_attr.attr,
&pages_sharing_attr.attr,
&pages_unshared_attr.attr,
@@ -1968,8 +1933,6 @@ static int __init ksm_init(void)
struct task_struct *ksm_thread;
int err;
ksm_max_kernel_pages = totalram_pages / 4;
err = ksm_slab_init();
if (err)
goto out;