rpms/kernel/F-11 linux-2.6-mm-lru-dont-evict-mapped-executable-pages.patch, NONE, 1.1 linux-2.6-mm-lru-evict-streaming-io-pages-first.patch, NONE, 1.1 kernel.spec, 1.1595, 1.1596
Kyle McMartin
kyle at fedoraproject.org
Fri May 8 14:45:32 UTC 2009
Author: kyle
Update of /cvs/pkgs/rpms/kernel/F-11
In directory cvs1.fedora.phx.redhat.com:/tmp/cvs-serv5696
Modified Files:
kernel.spec
Added Files:
linux-2.6-mm-lru-dont-evict-mapped-executable-pages.patch
linux-2.6-mm-lru-evict-streaming-io-pages-first.patch
Log Message:
* Fri May 08 2009 Kyle McMartin <kyle at redhat.com> 2.6.29.2-134
- linux-2.6-mm-lru-dont-evict-mapped-executable-pages.patch
linux-2.6-mm-lru-evict-streaming-io-pages-first.patch
Add VM patches from Rik to fix responsiveness, tested on the rawhide
kernel.
linux-2.6-mm-lru-dont-evict-mapped-executable-pages.patch:
--- NEW FILE linux-2.6-mm-lru-dont-evict-mapped-executable-pages.patch ---
From: Wu Fengguang <fengguang.wu at intel.com>
Subject: [PATCH -mm] vmscan: make mapped executable pages the first class citizen
Date: Thu, 7 May 2009 20:11:01 +0800
To: Andrew Morton <akpm at linux-foundation.org>
Introduce AS_EXEC to mark executables and their linked libraries, and to
protect their referenced active pages from being deactivated.
CC: Elladan <elladan at eskimo.com>
CC: Nick Piggin <npiggin at suse.de>
CC: Johannes Weiner <hannes at cmpxchg.org>
CC: Christoph Lameter <cl at linux-foundation.org>
CC: KOSAKI Motohiro <kosaki.motohiro at jp.fujitsu.com>
Acked-by: Peter Zijlstra <peterz at infradead.org>
Acked-by: Rik van Riel <riel at redhat.com>
Signed-off-by: Wu Fengguang <fengguang.wu at intel.com>
---
include/linux/pagemap.h | 1 +
mm/mmap.c | 2 ++
mm/nommu.c | 2 ++
mm/vmscan.c | 35 +++++++++++++++++++++++++++++++++--
4 files changed, 38 insertions(+), 2 deletions(-)
--- linux.orig/include/linux/pagemap.h
+++ linux/include/linux/pagemap.h
@@ -25,6 +25,7 @@ enum mapping_flags {
#ifdef CONFIG_UNEVICTABLE_LRU
AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
#endif
+ AS_EXEC = __GFP_BITS_SHIFT + 4, /* mapped PROT_EXEC somewhere */
};
static inline void mapping_set_error(struct address_space *mapping, int error)
--- linux.orig/mm/mmap.c
+++ linux/mm/mmap.c
@@ -1194,6 +1194,8 @@ munmap_back:
goto unmap_and_free_vma;
if (vm_flags & VM_EXECUTABLE)
added_exe_file_vma(mm);
+ if (vm_flags & VM_EXEC)
+ set_bit(AS_EXEC, &file->f_mapping->flags);
} else if (vm_flags & VM_SHARED) {
error = shmem_zero_setup(vma);
if (error)
--- linux.orig/mm/nommu.c
+++ linux/mm/nommu.c
@@ -1224,6 +1224,8 @@ unsigned long do_mmap_pgoff(struct file
added_exe_file_vma(current->mm);
vma->vm_mm = current->mm;
}
+ if (vm_flags & VM_EXEC)
+ set_bit(AS_EXEC, &file->f_mapping->flags);
}
down_write(&nommu_region_sem);
--- linux.orig/mm/vmscan.c
+++ linux/mm/vmscan.c
@@ -1230,6 +1230,7 @@ static void shrink_active_list(unsigned
unsigned long pgmoved;
unsigned long pgscanned;
LIST_HEAD(l_hold); /* The pages which were snipped off */
+ LIST_HEAD(l_active);
LIST_HEAD(l_inactive);
struct page *page;
struct pagevec pvec;
@@ -1269,8 +1270,15 @@ static void shrink_active_list(unsigned
/* page_referenced clears PageReferenced */
if (page_mapping_inuse(page) &&
- page_referenced(page, 0, sc->mem_cgroup))
+ page_referenced(page, 0, sc->mem_cgroup)) {
+ struct address_space *mapping = page_mapping(page);
+
pgmoved++;
+ if (mapping && test_bit(AS_EXEC, &mapping->flags)) {
+ list_add(&page->lru, &l_active);
+ continue;
+ }
+ }
list_add(&page->lru, &l_inactive);
}
@@ -1279,7 +1287,6 @@ static void shrink_active_list(unsigned
* Move the pages to the [file or anon] inactive list.
*/
pagevec_init(&pvec, 1);
- lru = LRU_BASE + file * LRU_FILE;
spin_lock_irq(&zone->lru_lock);
/*
@@ -1291,6 +1298,7 @@ static void shrink_active_list(unsigned
reclaim_stat->recent_rotated[!!file] += pgmoved;
pgmoved = 0;
+ lru = LRU_BASE + file * LRU_FILE;
while (!list_empty(&l_inactive)) {
page = lru_to_page(&l_inactive);
prefetchw_prev_lru_page(page, &l_inactive, flags);
@@ -1313,6 +1321,29 @@ static void shrink_active_list(unsigned
}
__count_zone_vm_events(PGREFILL, zone, pgscanned);
__count_vm_events(PGDEACTIVATE, pgdeactivate);
+
+ pgmoved = 0; /* count pages moved back to active list */
+ lru = LRU_ACTIVE + file * LRU_FILE;
+ while (!list_empty(&l_active)) {
+ page = lru_to_page(&l_active);
+ prefetchw_prev_lru_page(page, &l_active, flags);
+ VM_BUG_ON(PageLRU(page));
+ SetPageLRU(page);
+ VM_BUG_ON(!PageActive(page));
+
+ list_move(&page->lru, &zone->lru[lru].list);
+ mem_cgroup_add_lru_list(page, lru);
+ pgmoved++;
+ if (!pagevec_add(&pvec, page)) {
+ spin_unlock_irq(&zone->lru_lock);
+ if (buffer_heads_over_limit)
+ pagevec_strip(&pvec);
+ __pagevec_release(&pvec);
+ spin_lock_irq(&zone->lru_lock);
+ }
+ }
+ __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
+
spin_unlock_irq(&zone->lru_lock);
if (vm_swap_full())
pagevec_swap_free(&pvec);
linux-2.6-mm-lru-evict-streaming-io-pages-first.patch:
--- NEW FILE linux-2.6-mm-lru-evict-streaming-io-pages-first.patch ---
When the file LRU lists are dominated by streaming IO pages,
evict those pages first, before considering evicting other
pages.
This should be safe from deadlocks or performance problems
because only three things can happen to an inactive file page:
1) referenced twice and promoted to the active list
2) evicted by the pageout code
3) under IO, after which it will get evicted or promoted
The pages freed in this way can either be reused for streaming
IO, or allocated for something else. If the pages are used for
streaming IO, this pageout pattern continues. Otherwise, we will
fall back to the normal pageout pattern.
Signed-off-by: Rik van Riel <riel at redhat.com>
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index a9e3b76..dbfe7ba 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -94,6 +94,7 @@ extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
int priority);
int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg);
+int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg);
unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
struct zone *zone,
enum lru_list lru);
@@ -239,6 +240,12 @@ mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
return 1;
}
+static inline int
+mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
+{
+ return 1;
+}
+
static inline unsigned long
mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone,
enum lru_list lru)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e44fb0f..026cb5a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -578,6 +578,17 @@ int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
return 0;
}
+int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
+{
+ unsigned long active;
+ unsigned long inactive;
+
+ inactive = mem_cgroup_get_all_zonestat(memcg, LRU_INACTIVE_FILE);
+ active = mem_cgroup_get_all_zonestat(memcg, LRU_ACTIVE_FILE);
+
+ return (active > inactive);
+}
+
unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
struct zone *zone,
enum lru_list lru)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index eac9577..a73f675 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1348,12 +1348,48 @@ static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
return low;
}
+static int inactive_file_is_low_global(struct zone *zone)
+{
+ unsigned long active, inactive;
+
+ active = zone_page_state(zone, NR_ACTIVE_FILE);
+ inactive = zone_page_state(zone, NR_INACTIVE_FILE);
+
+ return (active > inactive);
+}
+
+/**
+ * inactive_file_is_low - check if file pages need to be deactivated
+ * @zone: zone to check
+ * @sc: scan control of this context
+ *
+ * When the system is doing streaming IO, memory pressure here
+ * ensures that active file pages get deactivated, until more
+ * than half of the file pages are on the inactive list.
+ *
+ * Once we get to that situation, protect the system's working
+ * set from being evicted by disabling active file page aging.
+ *
+ * This uses a different ratio than the anonymous pages, because
+ * the page cache uses a use-once replacement algorithm.
+ */
+static int inactive_file_is_low(struct zone *zone, struct scan_control *sc)
+{
+ int low;
+
+ if (scanning_global_lru(sc))
+ low = inactive_file_is_low_global(zone);
+ else
+ low = mem_cgroup_inactive_file_is_low(sc->mem_cgroup);
+ return low;
+}
+
static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
struct zone *zone, struct scan_control *sc, int priority)
{
int file = is_file_lru(lru);
- if (lru == LRU_ACTIVE_FILE) {
+ if (lru == LRU_ACTIVE_FILE && inactive_file_is_low(zone, sc)) {
shrink_active_list(nr_to_scan, zone, sc, priority, file);
return 0;
}
Index: kernel.spec
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/F-11/kernel.spec,v
retrieving revision 1.1595
retrieving revision 1.1596
diff -u -p -r1.1595 -r1.1596
--- kernel.spec 7 May 2009 19:43:46 -0000 1.1595
+++ kernel.spec 8 May 2009 14:45:02 -0000 1.1596
@@ -596,6 +596,10 @@ Patch21: linux-2.6-tracehook.patch
Patch22: linux-2.6-utrace.patch
Patch23: linux-2.6-utrace-ftrace.patch
+# vm patches
+Patch25: linux-2.6-mm-lru-evict-streaming-io-pages-first.patch
+Patch26: linux-2.6-mm-lru-dont-evict-mapped-executable-pages.patch
+
# Support suspend/resume, other crash fixes
Patch30: linux-2.6-iommu-fixes.patch
@@ -1147,6 +1151,10 @@ ApplyPatch linux-2.6-tracehook.patch
ApplyPatch linux-2.6-utrace.patch
ApplyPatch linux-2.6-utrace-ftrace.patch
+# vm patches
+ApplyPatch linux-2.6-mm-lru-evict-streaming-io-pages-first.patch
+ApplyPatch linux-2.6-mm-lru-dont-evict-mapped-executable-pages.patch
+
# IOMMU fixes backported to 2.6.29
ApplyPatch linux-2.6-iommu-fixes.patch
@@ -1973,6 +1981,12 @@ fi
# and build.
%changelog
+* Fri May 08 2009 Kyle McMartin <kyle at redhat.com> 2.6.29.2-134
+- linux-2.6-mm-lru-dont-evict-mapped-executable-pages.patch
+ linux-2.6-mm-lru-evict-streaming-io-pages-first.patch
+ Add VM patches from Rik to fix responsiveness, tested on the rawhide
+ kernel.
+
* Thu May 07 2009 Adam Jackson <ajax at redhat.com>
- drm-intel-debugfs-ringbuffer.patch: Add debugfs support for
intel_gpu_dump utility.
More information about the scm-commits
mailing list