rpms/kernel/F-7 kernel-2.6.spec, 1.3313, 1.3314 linux-2.6-sched-cfs.patch, 1.10, 1.11
Chuck Ebbert (cebbert)
fedora-extras-commits at redhat.com
Tue Aug 14 18:42:59 UTC 2007
Author: cebbert
Update of /cvs/pkgs/rpms/kernel/F-7
In directory cvs-int.fedora.redhat.com:/tmp/cvs-serv6392
Modified Files:
kernel-2.6.spec linux-2.6-sched-cfs.patch
Log Message:
* Tue Aug 14 2007 Chuck Ebbert <cebbert at redhat.com>
- update CFS scheduler with upstream patches
Index: kernel-2.6.spec
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/F-7/kernel-2.6.spec,v
retrieving revision 1.3313
retrieving revision 1.3314
diff -u -r1.3313 -r1.3314
--- kernel-2.6.spec 14 Aug 2007 17:52:31 -0000 1.3313
+++ kernel-2.6.spec 14 Aug 2007 18:42:26 -0000 1.3314
@@ -2275,6 +2275,9 @@
%changelog
* Tue Aug 14 2007 Chuck Ebbert <cebbert at redhat.com>
+- update CFS scheduler with upstream patches
+
+* Tue Aug 14 2007 Chuck Ebbert <cebbert at redhat.com>
- set CONFIG_NET_RADIO (#251094)
* Fri Aug 10 2007 Chuck Ebbert <cebbert at redhat.com>
linux-2.6-sched-cfs.patch:
Index: linux-2.6-sched-cfs.patch
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/F-7/linux-2.6-sched-cfs.patch,v
retrieving revision 1.10
retrieving revision 1.11
diff -u -r1.10 -r1.11
--- linux-2.6-sched-cfs.patch 10 Aug 2007 17:27:08 -0000 1.10
+++ linux-2.6-sched-cfs.patch 14 Aug 2007 18:42:26 -0000 1.11
@@ -51,6 +51,18 @@
[mingo/f1a438d813d416fa9f4be4e6dbd10b54c5938d89][merged inline]
sched: reorder update_cpu_load(rq) with the ->task_tick() call
+[018a2212950457b1093e504cd834aa0fe749da6c][merged inline]
+remove unused load_balance_class
+
+[018a2212950457b1093e504cd834aa0fe749da6c]
+improve rq-clock overflow logic
+
+[de0cf899bbf06b6f64a5dce9c59d74c41b6b4232]
+sched: run_rebalance_domains: s/SCHED_IDLE/CPU_IDLE/
+
+[5d2b3d3695a841231b65b5536a70dc29961c5611]
+sched: fix sleeper bonus
+
Index: linux/Documentation/kernel-parameters.txt
===================================================================
@@ -1698,7 +1710,7 @@
/*
* This is part of a global counter where only the total sum
-@@ -260,14 +210,18 @@ struct rq {
+@@ -260,14 +210,16 @@ struct rq {
*/
unsigned long nr_uninterruptible;
@@ -1717,8 +1729,6 @@
+ unsigned int clock_warps, clock_overflows;
+ unsigned int clock_unstable_events;
+
-+ struct sched_class *load_balance_class;
-+
atomic_t nr_iowait;
#ifdef CONFIG_SMP
@@ -8333,3 +8343,159 @@
}
static inline void
+Gitweb: http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=529c77261bccd9d37f110f58b0753d95beaa9fa2
+Commit: 529c77261bccd9d37f110f58b0753d95beaa9fa2
+Parent: ac07860264bd2b18834d3fa3be47032115524cea
+Author: Ingo Molnar <mingo at elte.hu>
+AuthorDate: Fri Aug 10 23:05:11 2007 +0200
+Committer: Ingo Molnar <mingo at elte.hu>
+CommitDate: Fri Aug 10 23:05:11 2007 +0200
+
+ sched: improve rq-clock overflow logic
+
+ improve the rq-clock overflow logic: limit the absolute rq->clock
+ delta since the last scheduler tick, instead of limiting the delta
+ itself.
+
+ tested by Arjan van de Ven - whole laptop was misbehaving due to
+ an incorrectly calibrated cpu_khz confusing sched_clock().
+
+ Signed-off-by: Ingo Molnar <mingo at elte.hu>
+ Signed-off-by: Arjan van de Ven <arjan at linux.intel.com>
+---
+ kernel/sched.c | 15 +++++++++++++--
+ 1 files changed, 13 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/sched.c b/kernel/sched.c
+index b0afd8d..6247e4a 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -263,6 +263,7 @@ struct rq {
+
+ unsigned int clock_warps, clock_overflows;
+ unsigned int clock_unstable_events;
++ u64 tick_timestamp;
+
+ atomic_t nr_iowait;
+
+@@ -341,8 +342,11 @@ static void __update_rq_clock(struct rq *rq)
+ /*
+ * Catch too large forward jumps too:
+ */
+- if (unlikely(delta > 2*TICK_NSEC)) {
+- clock++;
++ if (unlikely(clock + delta > rq->tick_timestamp + TICK_NSEC)) {
++ if (clock < rq->tick_timestamp + TICK_NSEC)
++ clock = rq->tick_timestamp + TICK_NSEC;
++ else
++ clock++;
+ rq->clock_overflows++;
+ } else {
+ if (unlikely(delta > rq->clock_max_delta))
+@@ -3308,8 +3312,15 @@ void scheduler_tick(void)
+ int cpu = smp_processor_id();
+ struct rq *rq = cpu_rq(cpu);
+ struct task_struct *curr = rq->curr;
++ u64 next_tick = rq->tick_timestamp + TICK_NSEC;
+
+ spin_lock(&rq->lock);
++ /*
++ * Let rq->clock advance by at least TICK_NSEC:
++ */
++ if (unlikely(rq->clock < next_tick))
++ rq->clock = next_tick;
++ rq->tick_timestamp = rq->clock;
+ update_cpu_load(rq);
+ if (curr != rq->idle) /* FIXME: needed? */
+ curr->sched_class->task_tick(rq, curr);
+Gitweb: http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=de0cf899bbf06b6f64a5dce9c59d74c41b6b4232
+Commit: de0cf899bbf06b6f64a5dce9c59d74c41b6b4232
+Parent: 5d2b3d3695a841231b65b5536a70dc29961c5611
+Author: Oleg Nesterov <oleg at tv-sign.ru>
+AuthorDate: Sun Aug 12 18:08:19 2007 +0200
+Committer: Ingo Molnar <mingo at elte.hu>
+CommitDate: Sun Aug 12 18:08:19 2007 +0200
+
+ sched: run_rebalance_domains: s/SCHED_IDLE/CPU_IDLE/
+
+ rebalance_domains(SCHED_IDLE) looks strange (typo), change it to CPU_IDLE.
+
+ the effect of this bug was slightly more agressive idle-balancing on
+ SMP than intended.
+
+ Signed-off-by: Oleg Nesterov <oleg at tv-sign.ru>
+ Signed-off-by: Ingo Molnar <mingo at elte.hu>
+---
+ kernel/sched.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/kernel/sched.c b/kernel/sched.c
+index c02659f..45e17b8 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -3106,7 +3106,7 @@ static void run_rebalance_domains(struct softirq_action *h)
+ if (need_resched())
+ break;
+
+- rebalance_domains(balance_cpu, SCHED_IDLE);
++ rebalance_domains(balance_cpu, CPU_IDLE);
+
+ rq = cpu_rq(balance_cpu);
+ if (time_after(this_rq->next_balance, rq->next_balance))
+Gitweb: http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=5d2b3d3695a841231b65b5536a70dc29961c5611
+Commit: 5d2b3d3695a841231b65b5536a70dc29961c5611
+Parent: 6707de00fdec3e3225192fe3dcd21323a8936b1f
+Author: Ingo Molnar <mingo at elte.hu>
+AuthorDate: Sun Aug 12 18:08:19 2007 +0200
+Committer: Ingo Molnar <mingo at elte.hu>
+CommitDate: Sun Aug 12 18:08:19 2007 +0200
+
+ sched: fix sleeper bonus
+
+ Peter Ziljstra noticed that the sleeper bonus deduction code
+ was not properly rate-limited: a task that scheduled more
+ frequently would get a disproportionately large deduction.
+ So limit the deduction to delta_exec.
+
+ Signed-off-by: Ingo Molnar <mingo at elte.hu>
+---
+ kernel/sched_fair.c | 12 ++++++------
+ 1 files changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
+index c5af389..fedbb51 100644
+--- a/kernel/sched_fair.c
++++ b/kernel/sched_fair.c
+@@ -75,7 +75,7 @@ enum {
+
+ unsigned int sysctl_sched_features __read_mostly =
+ SCHED_FEAT_FAIR_SLEEPERS *1 |
+- SCHED_FEAT_SLEEPER_AVG *1 |
++ SCHED_FEAT_SLEEPER_AVG *0 |
+ SCHED_FEAT_SLEEPER_LOAD_AVG *1 |
+ SCHED_FEAT_PRECISE_CPU_LOAD *1 |
+ SCHED_FEAT_START_DEBIT *1 |
+@@ -304,11 +304,9 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+ delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw);
+
+ if (cfs_rq->sleeper_bonus > sysctl_sched_stat_granularity) {
+- delta = calc_delta_mine(cfs_rq->sleeper_bonus,
+- curr->load.weight, lw);
+- if (unlikely(delta > cfs_rq->sleeper_bonus))
+- delta = cfs_rq->sleeper_bonus;
+-
++ delta = min(cfs_rq->sleeper_bonus, (u64)delta_exec);
++ delta = calc_delta_mine(delta, curr->load.weight, lw);
++ delta = min((u64)delta, cfs_rq->sleeper_bonus);
+ cfs_rq->sleeper_bonus -= delta;
+ delta_mine -= delta;
+ }
+@@ -521,6 +519,8 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ * Track the amount of bonus we've given to sleepers:
+ */
+ cfs_rq->sleeper_bonus += delta_fair;
++ if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit))
++ cfs_rq->sleeper_bonus = sysctl_sched_runtime_limit;
+
+ schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
+ }
More information about the scm-commits
mailing list