rpms/kernel/devel patch-2.6.22-rc6.bz2.sign, NONE, 1.1 .cvsignore, 1.635, 1.636 kernel-2.6.spec, 1.3239, 1.3240 linux-2.6-sched-cfs.patch, 1.7, 1.8 sources, 1.600, 1.601 upstream, 1.522, 1.523 patch-2.6.22-rc5-git8.bz2.sign, 1.1, NONE patch-2.6.22-rc5.bz2.sign, 1.1, NONE

Chuck Ebbert (cebbert) fedora-extras-commits at redhat.com
Mon Jun 25 23:53:35 UTC 2007


Author: cebbert

Update of /cvs/pkgs/rpms/kernel/devel
In directory cvs-int.fedora.redhat.com:/tmp/cvs-serv31901

Modified Files:
	.cvsignore kernel-2.6.spec linux-2.6-sched-cfs.patch sources 
	upstream 
Added Files:
	patch-2.6.22-rc6.bz2.sign 
Removed Files:
	patch-2.6.22-rc5-git8.bz2.sign patch-2.6.22-rc5.bz2.sign 
Log Message:
* Mon Jun 25 2007 Chuck Ebbert <cebbert at redhat.com>
- 2.6.22-rc6
- cfs update for -rc6



--- NEW FILE patch-2.6.22-rc6.bz2.sign ---
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.7 (GNU/Linux)
Comment: See http://www.kernel.org/signature.html for info

iD8DBQBGf2CAyGugalF9Dw4RAjBZAJ0QPA+N0WpVY69HDn6/o2KVV2wD3gCfTHMI
iGQZ7GA8pttWEP6e0nO7HTk=
=+w2v
-----END PGP SIGNATURE-----


Index: .cvsignore
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/.cvsignore,v
retrieving revision 1.635
retrieving revision 1.636
diff -u -r1.635 -r1.636
--- .cvsignore	24 Jun 2007 03:42:11 -0000	1.635
+++ .cvsignore	25 Jun 2007 23:52:56 -0000	1.636
@@ -3,5 +3,4 @@
 temp-*
 kernel-2.6.21
 linux-2.6.21.tar.bz2
-patch-2.6.22-rc5.bz2
-patch-2.6.22-rc5-git8.bz2
+patch-2.6.22-rc6.bz2


Index: kernel-2.6.spec
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/kernel-2.6.spec,v
retrieving revision 1.3239
retrieving revision 1.3240
diff -u -r1.3239 -r1.3240
--- kernel-2.6.spec	25 Jun 2007 14:31:13 -0000	1.3239
+++ kernel-2.6.spec	25 Jun 2007 23:52:57 -0000	1.3240
@@ -413,8 +413,7 @@
 ### BRANCH PATCH ###
 %else
 # Here should be only the patches up to the upstream canonical Linus tree.
-Patch00: patch-2.6.22-rc5.bz2
-Patch01: patch-2.6.22-rc5-git8.bz2
+Patch00: patch-2.6.22-rc6.bz2
 %endif
 
 %if !%{nopatches}
@@ -907,8 +906,7 @@
 %else
 
 # Update to latest upstream.
-ApplyPatch patch-2.6.22-rc5.bz2
-ApplyPatch patch-2.6.22-rc5-git8.bz2
+ApplyPatch patch-2.6.22-rc6.bz2
 
 %endif
 %if !%{nopatches}
@@ -2090,6 +2088,10 @@
 %endif
 
 %changelog
+* Mon Jun 25 2007 Chuck Ebbert <cebbert at redhat.com>
+- 2.6.22-rc6
+- cfs update for -rc6
+
 * Mon Jun 25 2007 John W. Linville <linville at redhat.com>
 - Re-enable wireless-dev patch (updated for current kernel)
 

linux-2.6-sched-cfs.patch:

Index: linux-2.6-sched-cfs.patch
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/linux-2.6-sched-cfs.patch,v
retrieving revision 1.7
retrieving revision 1.8
diff -u -r1.7 -r1.8
--- linux-2.6-sched-cfs.patch	25 Jun 2007 03:00:46 -0000	1.7
+++ linux-2.6-sched-cfs.patch	25 Jun 2007 23:52:57 -0000	1.8
@@ -1,7 +1,39 @@
-Index: linux-cfs-2.6.22-git.q/Documentation/kernel-parameters.txt
+---
+ Documentation/kernel-parameters.txt |   43 
+ Documentation/sched-design-CFS.txt  |  119 +
+ Makefile                            |    2 
+ arch/i386/kernel/smpboot.c          |   12 
+ arch/i386/kernel/tsc.c              |    9 
+ arch/ia64/kernel/setup.c            |    6 
+ arch/mips/kernel/smp.c              |   11 
+ arch/sparc/kernel/smp.c             |   10 
+ arch/sparc64/kernel/smp.c           |   27 
+ block/cfq-iosched.c                 |    3 
+ fs/proc/array.c                     |   61 
+ fs/proc/base.c                      |   64 
+ include/asm-generic/bitops/sched.h  |   21 
+ include/linux/hardirq.h             |   13 
+ include/linux/sched.h               |  159 +-
+ include/linux/topology.h            |   14 
+ init/main.c                         |    5 
+ kernel/delayacct.c                  |   10 
+ kernel/exit.c                       |    3 
+ kernel/fork.c                       |    5 
+ kernel/posix-cpu-timers.c           |   34 
+ kernel/sched.c                      | 2843 ++++++++++++------------------------
+ kernel/sched_debug.c                |  260 +++
+ kernel/sched_fair.c                 |  884 +++++++++++
+ kernel/sched_idletask.c             |   68 
+ kernel/sched_rt.c                   |  215 ++
+ kernel/sched_stats.h                |  235 ++
+ kernel/softirq.c                    |    1 
+ kernel/sysctl.c                     |   76 
+ 29 files changed, 3165 insertions(+), 2048 deletions(-)
+
+Index: linux/Documentation/kernel-parameters.txt
 ===================================================================
---- linux-cfs-2.6.22-git.q.orig/Documentation/kernel-parameters.txt
-+++ linux-cfs-2.6.22-git.q/Documentation/kernel-parameters.txt
+--- linux.orig/Documentation/kernel-parameters.txt
++++ linux/Documentation/kernel-parameters.txt
 @@ -1019,49 +1019,6 @@ and is between 256 and 4096 characters. 
  
  	mga=		[HW,DRM]
@@ -52,10 +84,10 @@
  	mousedev.tap_time=
  			[MOUSE] Maximum time between finger touching and
  			leaving touchpad surface for touch to be considered
-Index: linux-cfs-2.6.22-git.q/Documentation/sched-design-CFS.txt
+Index: linux/Documentation/sched-design-CFS.txt
 ===================================================================
 --- /dev/null
-+++ linux-cfs-2.6.22-git.q/Documentation/sched-design-CFS.txt
++++ linux/Documentation/sched-design-CFS.txt
 @@ -0,0 +1,119 @@
 +
 +This is the CFS scheduler.
@@ -176,10 +208,23 @@
 +   iterators of the scheduling modules are used. The balancing code got
 +   quite a bit simpler as a result.
 +
-Index: linux-cfs-2.6.22-git.q/arch/i386/kernel/smpboot.c
+Index: linux/Makefile
+===================================================================
+--- linux.orig/Makefile
++++ linux/Makefile
+@@ -1,7 +1,7 @@
+ VERSION = 2
+ PATCHLEVEL = 6
+ SUBLEVEL = 22
+-EXTRAVERSION = -rc6
++EXTRAVERSION = -rc6-cfs-v18
+ NAME = Holy Dancing Manatees, Batman!
+ 
+ # *DOCUMENTATION*
+Index: linux/arch/i386/kernel/smpboot.c
 ===================================================================
---- linux-cfs-2.6.22-git.q.orig/arch/i386/kernel/smpboot.c
-+++ linux-cfs-2.6.22-git.q/arch/i386/kernel/smpboot.c
+--- linux.orig/arch/i386/kernel/smpboot.c
++++ linux/arch/i386/kernel/smpboot.c
 @@ -941,17 +941,6 @@ exit:
  }
  #endif
@@ -206,10 +251,10 @@
  
  	set_cpu_sibling_map(0);
  
-Index: linux-cfs-2.6.22-git.q/arch/i386/kernel/tsc.c
+Index: linux/arch/i386/kernel/tsc.c
 ===================================================================
---- linux-cfs-2.6.22-git.q.orig/arch/i386/kernel/tsc.c
-+++ linux-cfs-2.6.22-git.q/arch/i386/kernel/tsc.c
+--- linux.orig/arch/i386/kernel/tsc.c
++++ linux/arch/i386/kernel/tsc.c
 @@ -4,6 +4,7 @@
   * See comments there for proper credits.
   */
@@ -241,10 +286,10 @@
  	if (!tsc_unstable) {
  		tsc_unstable = 1;
  		tsc_enabled = 0;
-Index: linux-cfs-2.6.22-git.q/arch/ia64/kernel/setup.c
+Index: linux/arch/ia64/kernel/setup.c
 ===================================================================
---- linux-cfs-2.6.22-git.q.orig/arch/ia64/kernel/setup.c
-+++ linux-cfs-2.6.22-git.q/arch/ia64/kernel/setup.c
+--- linux.orig/arch/ia64/kernel/setup.c
++++ linux/arch/ia64/kernel/setup.c
 @@ -805,7 +805,6 @@ static void __cpuinit
  get_max_cacheline_size (void)
  {
@@ -272,10 +317,10 @@
  	if (max > ia64_max_cacheline_size)
  		ia64_max_cacheline_size = max;
  }
-Index: linux-cfs-2.6.22-git.q/arch/mips/kernel/smp.c
+Index: linux/arch/mips/kernel/smp.c
 ===================================================================
---- linux-cfs-2.6.22-git.q.orig/arch/mips/kernel/smp.c
-+++ linux-cfs-2.6.22-git.q/arch/mips/kernel/smp.c
+--- linux.orig/arch/mips/kernel/smp.c
++++ linux/arch/mips/kernel/smp.c
 @@ -51,16 +51,6 @@ int __cpu_logical_map[NR_CPUS];		/* Map 
  EXPORT_SYMBOL(phys_cpu_present_map);
  EXPORT_SYMBOL(cpu_online_map);
@@ -301,10 +346,10 @@
  	plat_prepare_cpus(max_cpus);
  #ifndef CONFIG_HOTPLUG_CPU
  	cpu_present_map = cpu_possible_map;
-Index: linux-cfs-2.6.22-git.q/arch/sparc/kernel/smp.c
+Index: linux/arch/sparc/kernel/smp.c
 ===================================================================
---- linux-cfs-2.6.22-git.q.orig/arch/sparc/kernel/smp.c
-+++ linux-cfs-2.6.22-git.q/arch/sparc/kernel/smp.c
+--- linux.orig/arch/sparc/kernel/smp.c
++++ linux/arch/sparc/kernel/smp.c
 @@ -68,16 +68,6 @@ void __cpuinit smp_store_cpu_info(int id
  	cpu_data(id).prom_node = cpu_node;
  	cpu_data(id).mid = cpu_get_hwmid(cpu_node);
@@ -322,10 +367,10 @@
  	if (cpu_data(id).mid < 0)
  		panic("No MID found for CPU%d at node 0x%08d", id, cpu_node);
  }
-Index: linux-cfs-2.6.22-git.q/arch/sparc64/kernel/smp.c
+Index: linux/arch/sparc64/kernel/smp.c
 ===================================================================
---- linux-cfs-2.6.22-git.q.orig/arch/sparc64/kernel/smp.c
-+++ linux-cfs-2.6.22-git.q/arch/sparc64/kernel/smp.c
+--- linux.orig/arch/sparc64/kernel/smp.c
++++ linux/arch/sparc64/kernel/smp.c
 @@ -1163,32 +1163,6 @@ int setup_profiling_timer(unsigned int m
  	return -EINVAL;
  }
@@ -367,10 +412,10 @@
  }
  
  void __devinit smp_prepare_boot_cpu(void)
-Index: linux-cfs-2.6.22-git.q/block/cfq-iosched.c
+Index: linux/block/cfq-iosched.c
 ===================================================================
---- linux-cfs-2.6.22-git.q.orig/block/cfq-iosched.c
-+++ linux-cfs-2.6.22-git.q/block/cfq-iosched.c
+--- linux.orig/block/cfq-iosched.c
++++ linux/block/cfq-iosched.c
 @@ -1278,6 +1278,8 @@ static void cfq_init_prio_data(struct cf
  			/*
  			 * no prio set, place us in the middle of the BE classes
@@ -388,10 +433,10 @@
  			cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
  			cfqq->ioprio = 7;
  			cfq_clear_cfqq_idle_window(cfqq);
-Index: linux-cfs-2.6.22-git.q/fs/proc/array.c
+Index: linux/fs/proc/array.c
 ===================================================================
---- linux-cfs-2.6.22-git.q.orig/fs/proc/array.c
-+++ linux-cfs-2.6.22-git.q/fs/proc/array.c
+--- linux.orig/fs/proc/array.c
++++ linux/fs/proc/array.c
 @@ -165,7 +165,6 @@ static inline char * task_state(struct t
  	rcu_read_lock();
  	buffer += sprintf(buffer,
@@ -514,10 +559,10 @@
  		cputime_to_clock_t(cutime),
  		cputime_to_clock_t(cstime),
  		priority,
-Index: linux-cfs-2.6.22-git.q/fs/proc/base.c
+Index: linux/fs/proc/base.c
 ===================================================================
---- linux-cfs-2.6.22-git.q.orig/fs/proc/base.c
-+++ linux-cfs-2.6.22-git.q/fs/proc/base.c
+--- linux.orig/fs/proc/base.c
++++ linux/fs/proc/base.c
 @@ -296,7 +296,7 @@ static int proc_pid_wchan(struct task_st
   */
  static int proc_pid_schedstat(struct task_struct *task, char *buffer)
@@ -610,10 +655,10 @@
  	INF("cmdline",   S_IRUGO, pid_cmdline),
  	INF("stat",      S_IRUGO, tid_stat),
  	INF("statm",     S_IRUGO, pid_statm),
-Index: linux-cfs-2.6.22-git.q/include/asm-generic/bitops/sched.h
+Index: linux/include/asm-generic/bitops/sched.h
 ===================================================================
---- linux-cfs-2.6.22-git.q.orig/include/asm-generic/bitops/sched.h
-+++ linux-cfs-2.6.22-git.q/include/asm-generic/bitops/sched.h
+--- linux.orig/include/asm-generic/bitops/sched.h
++++ linux/include/asm-generic/bitops/sched.h
 @@ -6,28 +6,23 @@
  
  /*
@@ -651,10 +696,10 @@
  #else
  #error BITS_PER_LONG not defined
  #endif
-Index: linux-cfs-2.6.22-git.q/include/linux/hardirq.h
+Index: linux/include/linux/hardirq.h
 ===================================================================
---- linux-cfs-2.6.22-git.q.orig/include/linux/hardirq.h
-+++ linux-cfs-2.6.22-git.q/include/linux/hardirq.h
+--- linux.orig/include/linux/hardirq.h
++++ linux/include/linux/hardirq.h
 @@ -79,6 +79,19 @@
  #endif
  
@@ -675,10 +720,10 @@
  # define preemptible()	(preempt_count() == 0 && !irqs_disabled())
  # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
  #else
-Index: linux-cfs-2.6.22-git.q/include/linux/sched.h
+Index: linux/include/linux/sched.h
 ===================================================================
---- linux-cfs-2.6.22-git.q.orig/include/linux/sched.h
-+++ linux-cfs-2.6.22-git.q/include/linux/sched.h
+--- linux.orig/include/linux/sched.h
++++ linux/include/linux/sched.h
 @@ -2,7 +2,6 @@
  #define _LINUX_SCHED_H
  
@@ -991,10 +1036,10 @@
  static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
  {
  }
-Index: linux-cfs-2.6.22-git.q/include/linux/topology.h
+Index: linux/include/linux/topology.h
 ===================================================================
---- linux-cfs-2.6.22-git.q.orig/include/linux/topology.h
-+++ linux-cfs-2.6.22-git.q/include/linux/topology.h
+--- linux.orig/include/linux/topology.h
++++ linux/include/linux/topology.h
 @@ -50,10 +50,10 @@
  	for_each_online_node(node)						\
  		if (nr_cpus_node(node))
@@ -1052,10 +1097,10 @@
  				| BALANCE_FOR_PKG_POWER,\
  	.last_balance		= jiffies,		\
  	.balance_interval	= 1,			\
-Index: linux-cfs-2.6.22-git.q/init/main.c
+Index: linux/init/main.c
 ===================================================================
---- linux-cfs-2.6.22-git.q.orig/init/main.c
-+++ linux-cfs-2.6.22-git.q/init/main.c
+--- linux.orig/init/main.c
++++ linux/init/main.c
 @@ -436,15 +436,16 @@ static void noinline __init_refok rest_i
  
  	/*
@@ -1075,10 +1120,10 @@
  
  /* Check for early params. */
  static int __init do_early_param(char *param, char *val)
-Index: linux-cfs-2.6.22-git.q/kernel/delayacct.c
+Index: linux/kernel/delayacct.c
 ===================================================================
---- linux-cfs-2.6.22-git.q.orig/kernel/delayacct.c
-+++ linux-cfs-2.6.22-git.q/kernel/delayacct.c
+--- linux.orig/kernel/delayacct.c
++++ linux/kernel/delayacct.c
 @@ -99,9 +99,10 @@ void __delayacct_blkio_end(void)
  int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
  {
@@ -1106,10 +1151,10 @@
  	d->cpu_run_virtual_total =
  		(tmp < (s64)d->cpu_run_virtual_total) ?	0 : tmp;
  
-Index: linux-cfs-2.6.22-git.q/kernel/exit.c
+Index: linux/kernel/exit.c
 ===================================================================
---- linux-cfs-2.6.22-git.q.orig/kernel/exit.c
-+++ linux-cfs-2.6.22-git.q/kernel/exit.c
+--- linux.orig/kernel/exit.c
++++ linux/kernel/exit.c
 @@ -122,9 +122,9 @@ static void __exit_signal(struct task_st
  		sig->maj_flt += tsk->maj_flt;
  		sig->nvcsw += tsk->nvcsw;
@@ -1129,10 +1174,10 @@
  	write_unlock_irq(&tasklist_lock);
  	proc_flush_task(p);
  	release_thread(p);
-Index: linux-cfs-2.6.22-git.q/kernel/fork.c
+Index: linux/kernel/fork.c
 ===================================================================
---- linux-cfs-2.6.22-git.q.orig/kernel/fork.c
-+++ linux-cfs-2.6.22-git.q/kernel/fork.c
+--- linux.orig/kernel/fork.c
++++ linux/kernel/fork.c
 @@ -117,6 +117,7 @@ void __put_task_struct(struct task_struc
  	WARN_ON(!(tsk->exit_state & (EXIT_DEAD | EXIT_ZOMBIE)));
  	WARN_ON(atomic_read(&tsk->usage));
@@ -1159,10 +1204,10 @@
  #ifdef CONFIG_TASK_XACCT
  	p->rchar = 0;		/* I/O counter: bytes read */
  	p->wchar = 0;		/* I/O counter: bytes written */
-Index: linux-cfs-2.6.22-git.q/kernel/posix-cpu-timers.c
+Index: linux/kernel/posix-cpu-timers.c
 ===================================================================
---- linux-cfs-2.6.22-git.q.orig/kernel/posix-cpu-timers.c
-+++ linux-cfs-2.6.22-git.q/kernel/posix-cpu-timers.c
+--- linux.orig/kernel/posix-cpu-timers.c
++++ linux/kernel/posix-cpu-timers.c
 @@ -161,7 +161,7 @@ static inline cputime_t virt_ticks(struc
  }
  static inline unsigned long long sched_ns(struct task_struct *p)
@@ -1303,10 +1348,10 @@
  		return;
  
  #undef	UNEXPIRED
-Index: linux-cfs-2.6.22-git.q/kernel/sched.c
+Index: linux/kernel/sched.c
 ===================================================================
---- linux-cfs-2.6.22-git.q.orig/kernel/sched.c
-+++ linux-cfs-2.6.22-git.q/kernel/sched.c
+--- linux.orig/kernel/sched.c
++++ linux/kernel/sched.c
 @@ -16,6 +16,11 @@
   *		by Davide Libenzi, preemptible kernel bits by Robert Love.
   *  2003-09-03	Interactivity tuning by Con Kolivas.
@@ -1999,17 +2044,17 @@
 -static inline void
 -enqueue_task_head(struct task_struct *p, struct prio_array *array)
 +static void update_load_sub(struct load_weight *lw, unsigned long dec)
-+{
-+	lw->weight -= dec;
-+	lw->inv_weight = 0;
-+}
-+
-+static void __update_curr_load(struct rq *rq, struct load_stat *ls)
  {
 -	list_add(&p->run_list, array->queue + p->prio);
 -	__set_bit(p->prio, array->bitmap);
 -	array->nr_active++;
 -	p->array = array;
++	lw->weight -= dec;
++	lw->inv_weight = 0;
++}
++
++static void __update_curr_load(struct rq *rq, struct load_stat *ls)
++{
 +	if (rq->curr != rq->idle && ls->load.weight) {
 +		ls->delta_exec += ls->delta_stat;
 +		ls->delta_fair += calc_delta_fair(ls->delta_stat, &ls->load);
@@ -2234,11 +2279,9 @@
  
  /*
 - * __activate_task - move a task to the runqueue.
-+ * activate_task - move a task to the runqueue.
-  */
+- */
 -static void __activate_task(struct task_struct *p, struct rq *rq)
-+static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
- {
+-{
 -	struct prio_array *target = rq->active;
 -
 -	if (batch_task(p))
@@ -2246,13 +2289,14 @@
 -	enqueue_task(p, target);
 -	inc_nr_running(p, rq);
 -}
-+	u64 now = rq_clock(rq);
- 
+-
 -/*
 - * __activate_idle_task - move idle task to the _front_ of runqueue.
-- */
++ * activate_task - move a task to the runqueue.
+  */
 -static inline void __activate_idle_task(struct task_struct *p, struct rq *rq)
--{
++static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
+ {
 -	enqueue_task_head(p, rq->active);
 -	inc_nr_running(p, rq);
 -}
@@ -2318,7 +2362,8 @@
 -			 * and the higher the priority boost gets as well.
 -			 */
 -			p->sleep_avg += sleep_time;
--
++	u64 now = rq_clock(rq);
+ 
 -		}
 -		if (p->sleep_avg > NS_MAX_SLEEP_AVG)
 -			p->sleep_avg = NS_MAX_SLEEP_AVG;
@@ -2435,38 +2480,38 @@
 -		return;
 -
 -	set_tsk_thread_flag(p, TIF_NEED_RESCHED);
-+	u64 now = rq_clock(rq);
- 
+-
 -	cpu = task_cpu(p);
 -	if (cpu == smp_processor_id())
 -		return;
-+	if (p->state == TASK_UNINTERRUPTIBLE)
-+		rq->nr_uninterruptible++;
- 
+-
 -	/* NEED_RESCHED must be visible before we test polling */
 -	smp_mb();
 -	if (!tsk_is_polling(p))
 -		smp_send_reschedule(cpu);
 -}
--
++	u64 now = rq_clock(rq);
+ 
 -static void resched_cpu(int cpu)
 -{
 -	struct rq *rq = cpu_rq(cpu);
 -	unsigned long flags;
--
++	if (p->state == TASK_UNINTERRUPTIBLE)
++		rq->nr_uninterruptible++;
+ 
 -	if (!spin_trylock_irqsave(&rq->lock, flags))
 -		return;
 -	resched_task(cpu_curr(cpu));
 -	spin_unlock_irqrestore(&rq->lock, flags);
--}
++	dequeue_task(rq, p, sleep, now);
++	dec_nr_running(p, rq, now);
+ }
 -#else
 -static inline void resched_task(struct task_struct *p)
 -{
 -	assert_spin_locked(&task_rq(p)->lock);
 -	set_tsk_need_resched(p);
-+	dequeue_task(rq, p, sleep, now);
-+	dec_nr_running(p, rq, now);
- }
+-}
 -#endif
  
  /**
@@ -2789,18 +2834,16 @@
   * wake_up_new_task - wake up a newly created task for the first time.
   *
   * This function will do some initial scheduler statistics housekeeping
-@@ -1752,110 +1548,35 @@ void fastcall sched_fork(struct task_str
+@@ -1752,108 +1548,33 @@ void fastcall sched_fork(struct task_str
   */
  void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
  {
 -	struct rq *rq, *this_rq;
  	unsigned long flags;
 -	int this_cpu, cpu;
-+	struct rq *rq;
-+	int this_cpu;
- 
- 	rq = task_rq_lock(p, &flags);
- 	BUG_ON(p->state != TASK_RUNNING);
+-
+-	rq = task_rq_lock(p, &flags);
+-	BUG_ON(p->state != TASK_RUNNING);
 -	this_cpu = smp_processor_id();
 -	cpu = task_cpu(p);
 -
@@ -2812,10 +2855,11 @@
 -	 */
 -	p->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(p) *
 -		CHILD_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
-+	this_cpu = smp_processor_id(); /* parent's CPU */
- 
- 	p->prio = effective_prio(p);
 -
+-	p->prio = effective_prio(p);
++	struct rq *rq;
++	int this_cpu;
+ 
 -	if (likely(cpu == this_cpu)) {
 -		if (!(clone_flags & CLONE_VM)) {
 -			/*
@@ -2846,7 +2890,10 @@
 -		this_rq = rq;
 -	} else {
 -		this_rq = cpu_rq(this_cpu);
--
++	rq = task_rq_lock(p, &flags);
++	BUG_ON(p->state != TASK_RUNNING);
++	this_cpu = smp_processor_id(); /* parent's CPU */
+ 
 -		/*
 -		 * Not the local CPU - must adjust timestamp. This should
 -		 * get optimised away in the !CONFIG_SMP case.
@@ -2856,19 +2903,29 @@
 -		__activate_task(p, rq);
 -		if (TASK_PREEMPTS_CURR(p, rq))
 -			resched_task(rq->curr);
--
--		/*
++	p->prio = effective_prio(p);
+ 
++	if (!sysctl_sched_child_runs_first || (clone_flags & CLONE_VM) ||
++			task_cpu(p) != this_cpu || !current->se.on_rq) {
++		activate_task(rq, p, 0);
++	} else {
+ 		/*
 -		 * Parent and child are on different CPUs, now get the
 -		 * parent runqueue to update the parent's ->sleep_avg:
--		 */
++		 * Let the scheduling class do new task startup
++		 * management (if any):
+ 		 */
 -		task_rq_unlock(rq, &flags);
 -		this_rq = task_rq_lock(current, &flags);
--	}
++		p->sched_class->task_new(rq, p);
+ 	}
 -	current->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(current) *
 -		PARENT_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
 -	task_rq_unlock(this_rq, &flags);
--}
--
++	check_preempt_curr(rq, p);
++	task_rq_unlock(rq, &flags);
+ }
+ 
 -/*
 - * Potentially available exiting-child timeslices are
 - * retrieved here - this way the parent does not get
@@ -2879,7 +2936,8 @@
 - * was given away by the parent in the first place.)
 - */
 -void fastcall sched_exit(struct task_struct *p)
--{
++void sched_dead(struct task_struct *p)
+ {
 -	unsigned long flags;
 -	struct rq *rq;
 -
@@ -2897,29 +2955,11 @@
 -		p->parent->sleep_avg = p->parent->sleep_avg /
 -		(EXIT_WEIGHT + 1) * EXIT_WEIGHT + p->sleep_avg /
 -		(EXIT_WEIGHT + 1);
-+
-+	if (!sysctl_sched_child_runs_first || (clone_flags & CLONE_VM) ||
-+			task_cpu(p) != this_cpu || !current->se.on_rq) {
-+		activate_task(rq, p, 0);
-+	} else {
-+		/*
-+		 * Let the scheduling class do new task startup
-+		 * management (if any):
-+		 */
-+		p->sched_class->task_new(rq, p);
-+	}
-+	check_preempt_curr(rq, p);
- 	task_rq_unlock(rq, &flags);
+-	task_rq_unlock(rq, &flags);
++	WARN_ON_ONCE(p->se.on_rq);
  }
  
-+void sched_dead(struct task_struct *p)
-+{
-+	WARN_ON_ONCE(p->se.on_rq);
-+}
-+
  /**
-  * prepare_task_switch - prepare to switch tasks
-  * @rq: the runqueue preparing to switch
 @@ -1911,13 +1632,13 @@ static inline void finish_task_switch(st
  	prev_state = prev->state;
  	finish_arch_switch(prev);
@@ -3556,7 +3596,7 @@
  	if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
  	    !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
  		return -1;
-@@ -2934,28 +2706,33 @@ out_balanced:
+@@ -2934,8 +2706,8 @@ out_balanced:
  static void idle_balance(int this_cpu, struct rq *this_rq)
  {
  	struct sched_domain *sd;
@@ -3566,30 +3606,10 @@
 +	unsigned long next_balance = jiffies + HZ;
  
  	for_each_domain(this_cpu, sd) {
--		if (sd->flags & SD_BALANCE_NEWIDLE) {
-+		unsigned long interval;
-+
-+		if (!(sd->flags & SD_LOAD_BALANCE))
-+			continue;
-+
-+		if (sd->flags & SD_BALANCE_NEWIDLE)
- 			/* If we've pulled tasks over stop searching: */
- 			pulled_task = load_balance_newidle(this_cpu,
--							this_rq, sd);
--			if (time_after(next_balance,
--				  sd->last_balance + sd->balance_interval))
--				next_balance = sd->last_balance
--						+ sd->balance_interval;
--			if (pulled_task)
--				break;
--		}
-+								this_rq, sd);
-+
-+		interval = msecs_to_jiffies(sd->balance_interval);
-+		if (time_after(next_balance, sd->last_balance + interval))
-+			next_balance = sd->last_balance + interval;
-+		if (pulled_task)
-+			break;
+ 		unsigned long interval;
+@@ -2954,12 +2726,13 @@ static void idle_balance(int this_cpu, s
+ 		if (pulled_task)
+ 			break;
  	}
 -	if (!pulled_task)
 +	if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
@@ -3602,7 +3622,7 @@
  }
  
  /*
-@@ -2999,7 +2776,7 @@ static void active_load_balance(struct r
+@@ -3003,7 +2776,7 @@ static void active_load_balance(struct r
  		schedstat_inc(sd, alb_cnt);
  
  		if (move_tasks(target_rq, target_cpu, busiest_rq, 1,
@@ -3611,7 +3631,7 @@
  			       NULL))
  			schedstat_inc(sd, alb_pushed);
  		else
-@@ -3008,32 +2785,6 @@ static void active_load_balance(struct r
+@@ -3012,32 +2785,6 @@ static void active_load_balance(struct r
  	spin_unlock(&target_rq->lock);
  }
  
@@ -3644,7 +3664,7 @@
  #ifdef CONFIG_NO_HZ
  static struct {
  	atomic_t load_balancer;
-@@ -3116,7 +2867,7 @@ static DEFINE_SPINLOCK(balancing);
+@@ -3120,7 +2867,7 @@ static DEFINE_SPINLOCK(balancing);
   *
   * Balancing parameters are set up in arch_init_sched_domains.
   */
@@ -3653,7 +3673,7 @@
  {
  	int balance = 1;
  	struct rq *rq = cpu_rq(cpu);
-@@ -3130,13 +2881,16 @@ static inline void rebalance_domains(int
+@@ -3134,13 +2881,16 @@ static inline void rebalance_domains(int
  			continue;
  
  		interval = sd->balance_interval;
@@ -3671,7 +3691,7 @@
  
  		if (sd->flags & SD_SERIALIZE) {
  			if (!spin_trylock(&balancing))
-@@ -3150,7 +2904,7 @@ static inline void rebalance_domains(int
+@@ -3154,7 +2904,7 @@ static inline void rebalance_domains(int
  				 * longer idle, or one of our SMT siblings is
  				 * not idle.
  				 */
@@ -3680,7 +3700,7 @@
  			}
  			sd->last_balance = jiffies;
  		}
-@@ -3180,7 +2934,8 @@ static void run_rebalance_domains(struct
+@@ -3184,7 +2934,8 @@ static void run_rebalance_domains(struct
  {
  	int local_cpu = smp_processor_id();
  	struct rq *local_rq = cpu_rq(local_cpu);
@@ -3690,7 +3710,7 @@
  
  	rebalance_domains(local_cpu, idle);
  
-@@ -3223,9 +2978,8 @@ static void run_rebalance_domains(struct
+@@ -3227,9 +2978,8 @@ static void run_rebalance_domains(struct
   * idle load balancing owner or decide to stop the periodic load balancing,
   * if the whole system is idle.
   */
@@ -3701,7 +3721,7 @@
  #ifdef CONFIG_NO_HZ
  	/*
  	 * If we were in the nohz mode recently and busy at the current
-@@ -3277,68 +3031,45 @@ static inline void trigger_load_balance(
+@@ -3281,68 +3031,45 @@ static inline void trigger_load_balance(
  	if (time_after_eq(jiffies, rq->next_balance))
  		raise_softirq(SCHED_SOFTIRQ);
  }
@@ -3788,7 +3808,7 @@
   * Account user cpu time to a process.
   * @p: the process that the cpu time gets accounted to
   * @hardirq_offset: the offset to subtract from hardirq_count()
-@@ -3411,81 +3142,6 @@ void account_steal_time(struct task_stru
+@@ -3415,81 +3142,6 @@ void account_steal_time(struct task_stru
  		cpustat->steal = cputime64_add(cpustat->steal, tmp);
  }
  
@@ -3870,7 +3890,7 @@
  /*
   * This function gets called by the timer code, with HZ frequency.
   * We call it with interrupts disabled.
-@@ -3495,20 +3151,19 @@ out_unlock:
+@@ -3499,20 +3151,19 @@ out_unlock:
   */
  void scheduler_tick(void)
  {
@@ -3899,7 +3919,7 @@
  #endif
  }
  
-@@ -3550,170 +3205,129 @@ EXPORT_SYMBOL(sub_preempt_count);
+@@ -3554,170 +3205,129 @@ EXPORT_SYMBOL(sub_preempt_count);
  
  #endif
  
@@ -4152,7 +4172,7 @@
  	preempt_enable_no_resched();
  	if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
  		goto need_resched;
-@@ -4125,29 +3739,30 @@ EXPORT_SYMBOL(sleep_on_timeout);
+@@ -4129,29 +3739,30 @@ EXPORT_SYMBOL(sleep_on_timeout);
   */
  void rt_mutex_setprio(struct task_struct *p, int prio)
  {
@@ -4196,7 +4216,7 @@
  		/*
  		 * Reschedule if we are currently running on this runqueue and
  		 * our priority decreased, or if we are not currently running on
-@@ -4156,8 +3771,9 @@ void rt_mutex_setprio(struct task_struct
+@@ -4160,8 +3771,9 @@ void rt_mutex_setprio(struct task_struct
  		if (task_running(rq, p)) {
  			if (p->prio > oldprio)
  				resched_task(rq->curr);
@@ -4208,7 +4228,7 @@
  	}
  	task_rq_unlock(rq, &flags);
  }
-@@ -4166,10 +3782,10 @@ void rt_mutex_setprio(struct task_struct
+@@ -4170,10 +3782,10 @@ void rt_mutex_setprio(struct task_struct
  
  void set_user_nice(struct task_struct *p, long nice)
  {
@@ -4221,7 +4241,7 @@
  
  	if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
  		return;
-@@ -4178,20 +3794,21 @@ void set_user_nice(struct task_struct *p
+@@ -4182,20 +3794,21 @@ void set_user_nice(struct task_struct *p
  	 * the task might be in the middle of scheduling on another CPU.
  	 */
  	rq = task_rq_lock(p, &flags);
@@ -4248,7 +4268,7 @@
  	}
  
  	p->static_prio = NICE_TO_PRIO(nice);
-@@ -4200,9 +3817,9 @@ void set_user_nice(struct task_struct *p
+@@ -4204,9 +3817,9 @@ void set_user_nice(struct task_struct *p
  	p->prio = effective_prio(p);
  	delta = p->prio - old_prio;
  
@@ -4261,7 +4281,7 @@
  		/*
  		 * If the task increased its priority or is running and
  		 * lowered its priority, then reschedule its CPU:
-@@ -4322,20 +3939,29 @@ static inline struct task_struct *find_p
+@@ -4326,20 +3939,29 @@ static inline struct task_struct *find_p
  }
  
  /* Actually do priority change: must hold rq lock. */
@@ -4299,7 +4319,7 @@
  	set_load_weight(p);
  }
  
-@@ -4350,8 +3976,7 @@ static void __setscheduler(struct task_s
+@@ -4354,8 +3976,7 @@ static void __setscheduler(struct task_s
  int sched_setscheduler(struct task_struct *p, int policy,
  		       struct sched_param *param)
  {
@@ -4309,7 +4329,7 @@
  	unsigned long flags;
  	struct rq *rq;
  
-@@ -4362,12 +3987,13 @@ recheck:
+@@ -4366,12 +3987,13 @@ recheck:
  	if (policy < 0)
  		policy = oldpolicy = p->policy;
  	else if (policy != SCHED_FIFO && policy != SCHED_RR &&
@@ -4326,7 +4346,7 @@
  	 */
  	if (param->sched_priority < 0 ||
  	    (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
-@@ -4398,6 +4024,12 @@ recheck:
+@@ -4402,6 +4024,12 @@ recheck:
  			    param->sched_priority > rlim_rtprio)
  				return -EPERM;
  		}
@@ -4339,7 +4359,7 @@
  
  		/* can't change other user's priorities */
  		if ((current->euid != p->euid) &&
-@@ -4425,13 +4057,13 @@ recheck:
+@@ -4429,13 +4057,13 @@ recheck:
  		spin_unlock_irqrestore(&p->pi_lock, flags);
  		goto recheck;
  	}
@@ -4359,7 +4379,7 @@
  		/*
  		 * Reschedule if we are currently running on this runqueue and
  		 * our priority decreased, or if we are not currently running on
-@@ -4440,8 +4072,9 @@ recheck:
+@@ -4444,8 +4072,9 @@ recheck:
  		if (task_running(rq, p)) {
  			if (p->prio > oldprio)
  				resched_task(rq->curr);
@@ -4371,7 +4391,7 @@
  	}
  	__task_rq_unlock(rq);
  	spin_unlock_irqrestore(&p->pi_lock, flags);
-@@ -4713,41 +4346,18 @@ asmlinkage long sys_sched_getaffinity(pi
+@@ -4717,41 +4346,18 @@ asmlinkage long sys_sched_getaffinity(pi
  /**
   * sys_sched_yield - yield the current processor to other threads.
   *
@@ -4418,7 +4438,7 @@
  
  	/*
  	 * Since we are going to call schedule() anyway, there's
-@@ -4898,6 +4508,8 @@ asmlinkage long sys_sched_get_priority_m
+@@ -4902,6 +4508,8 @@ asmlinkage long sys_sched_get_priority_m
  		break;
  	case SCHED_NORMAL:
  	case SCHED_BATCH:
@@ -4427,7 +4447,7 @@
  		ret = 0;
  		break;
  	}
-@@ -4922,6 +4534,8 @@ asmlinkage long sys_sched_get_priority_m
+@@ -4926,6 +4534,8 @@ asmlinkage long sys_sched_get_priority_m
  		break;
  	case SCHED_NORMAL:
  	case SCHED_BATCH:
@@ -4436,7 +4456,7 @@
  		ret = 0;
  	}
  	return ret;
-@@ -4956,7 +4570,7 @@ long sys_sched_rr_get_interval(pid_t pid
+@@ -4960,7 +4570,7 @@ long sys_sched_rr_get_interval(pid_t pid
  		goto out_unlock;
  
  	jiffies_to_timespec(p->policy == SCHED_FIFO ?
@@ -4445,7 +4465,7 @@
  	read_unlock(&tasklist_lock);
  	retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
  out_nounlock:
-@@ -5037,6 +4651,12 @@ void show_state_filter(unsigned long sta
+@@ -5041,6 +4651,12 @@ void show_state_filter(unsigned long sta
  	 */
  	if (state_filter == -1)
  		debug_show_all_locks();
@@ -4458,7 +4478,7 @@
  }
  
  /**
-@@ -5052,13 +4672,12 @@ void __cpuinit init_idle(struct task_str
+@@ -5056,13 +4672,12 @@ void __cpuinit init_idle(struct task_str
  	struct rq *rq = cpu_rq(cpu);
  	unsigned long flags;
  
@@ -4476,7 +4496,7 @@
  
  	spin_lock_irqsave(&rq->lock, flags);
  	rq->curr = rq->idle = idle;
-@@ -5073,6 +4692,10 @@ void __cpuinit init_idle(struct task_str
+@@ -5077,6 +4692,10 @@ void __cpuinit init_idle(struct task_str
  #else
  	task_thread_info(idle)->preempt_count = 0;
  #endif
@@ -4487,7 +4507,7 @@
  }
  
  /*
-@@ -5084,6 +4707,27 @@ void __cpuinit init_idle(struct task_str
+@@ -5088,6 +4707,27 @@ void __cpuinit init_idle(struct task_str
   */
  cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
  
@@ -4515,7 +4535,7 @@
  #ifdef CONFIG_SMP
  /*
   * This is how migration works:
-@@ -5157,7 +4801,7 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed);
+@@ -5161,7 +4801,7 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed);
  static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
  {
  	struct rq *rq_dest, *rq_src;
@@ -4524,7 +4544,7 @@
  
  	if (unlikely(cpu_is_offline(dest_cpu)))
  		return ret;
-@@ -5173,20 +4817,13 @@ static int __migrate_task(struct task_st
+@@ -5177,20 +4817,13 @@ static int __migrate_task(struct task_st
  	if (!cpu_isset(dest_cpu, p->cpus_allowed))
  		goto out;
  
@@ -4551,7 +4571,7 @@
  	}
  	ret = 1;
  out:
-@@ -5338,7 +4975,8 @@ static void migrate_live_tasks(int src_c
+@@ -5342,7 +4975,8 @@ static void migrate_live_tasks(int src_c
  	write_unlock_irq(&tasklist_lock);
  }
  
@@ -4561,7 +4581,7 @@
   * It does so by boosting its priority to highest possible and adding it to
   * the _front_ of the runqueue. Used by CPU offline code.
   */
-@@ -5358,10 +4996,10 @@ void sched_idle_next(void)
+@@ -5362,10 +4996,10 @@ void sched_idle_next(void)
  	 */
  	spin_lock_irqsave(&rq->lock, flags);
  
@@ -4574,7 +4594,7 @@
  
  	spin_unlock_irqrestore(&rq->lock, flags);
  }
-@@ -5411,16 +5049,15 @@ static void migrate_dead(unsigned int de
+@@ -5415,16 +5049,15 @@ static void migrate_dead(unsigned int de
  static void migrate_dead_tasks(unsigned int dead_cpu)
  {
  	struct rq *rq = cpu_rq(dead_cpu);
@@ -4599,7 +4619,7 @@
  	}
  }
  #endif /* CONFIG_HOTPLUG_CPU */
-@@ -5451,7 +5088,7 @@ migration_call(struct notifier_block *nf
+@@ -5455,7 +5088,7 @@ migration_call(struct notifier_block *nf
  		kthread_bind(p, cpu);
  		/* Must be high prio: stop_machine expects to yield to it. */
  		rq = task_rq_lock(p, &flags);
@@ -4608,7 +4628,7 @@
  		task_rq_unlock(rq, &flags);
  		cpu_rq(cpu)->migration_thread = p;
  		break;
-@@ -5482,9 +5119,9 @@ migration_call(struct notifier_block *nf
+@@ -5486,9 +5119,9 @@ migration_call(struct notifier_block *nf
  		rq->migration_thread = NULL;
  		/* Idle task back to normal (off runqueue, low prio) */
  		rq = task_rq_lock(rq->idle, &flags);
@@ -4620,7 +4640,7 @@
  		migrate_dead_tasks(cpu);
  		task_rq_unlock(rq, &flags);
  		migrate_nr_uninterruptible(rq);
-@@ -5793,483 +5430,6 @@ init_sched_build_groups(cpumask_t span, 
+@@ -5797,483 +5430,6 @@ init_sched_build_groups(cpumask_t span, 
  
  #define SD_NODES_PER_DOMAIN 16
  
@@ -5104,7 +5124,7 @@
  #ifdef CONFIG_NUMA
  
  /**
-@@ -6799,10 +5959,6 @@ static int build_sched_domains(const cpu
+@@ -6803,10 +5959,6 @@ static int build_sched_domains(const cpu
  #endif
  		cpu_attach_domain(sd, i);
  	}
@@ -5115,7 +5135,7 @@
  
  	return 0;
  
-@@ -7009,10 +6165,12 @@ void __init sched_init_smp(void)
+@@ -7013,10 +6165,12 @@ void __init sched_init_smp(void)
  	/* Move init over to a non-isolated CPU */
  	if (set_cpus_allowed(current, non_isolated_cpus) < 0)
  		BUG();
@@ -5128,7 +5148,7 @@
  }
  #endif /* CONFIG_SMP */
  
-@@ -7028,8 +6186,15 @@ int in_sched_functions(unsigned long add
+@@ -7032,8 +6186,15 @@ int in_sched_functions(unsigned long add
  
  void __init sched_init(void)
  {
@@ -5145,7 +5165,7 @@
  
  	for_each_possible_cpu(i) {
  		struct prio_array *array;
-@@ -7039,15 +6204,17 @@ void __init sched_init(void)
+@@ -7043,15 +6204,17 @@ void __init sched_init(void)
  		spin_lock_init(&rq->lock);
  		lockdep_set_class(&rq->lock, &rq->rq_lock_key);
  		rq->nr_running = 0;
@@ -5168,7 +5188,7 @@
  		rq->push_cpu = 0;
  		rq->cpu = i;
  		rq->migration_thread = NULL;
-@@ -7055,16 +6222,14 @@ void __init sched_init(void)
+@@ -7059,16 +6222,14 @@ void __init sched_init(void)
  #endif
  		atomic_set(&rq->nr_iowait, 0);
  
@@ -5191,7 +5211,7 @@
  	}
  
  	set_load_weight(&init_task);
-@@ -7091,6 +6256,10 @@ void __init sched_init(void)
+@@ -7095,6 +6256,10 @@ void __init sched_init(void)
  	 * when this runqueue becomes "idle".
  	 */
  	init_idle(current, smp_processor_id());
@@ -5202,7 +5222,7 @@
  }
  
  #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
-@@ -7121,29 +6290,55 @@ EXPORT_SYMBOL(__might_sleep);
+@@ -7125,29 +6290,55 @@ EXPORT_SYMBOL(__might_sleep);
  #ifdef CONFIG_MAGIC_SYSRQ
  void normalize_rt_tasks(void)
  {
@@ -5268,10 +5288,10 @@
  		__task_rq_unlock(rq);
  		spin_unlock_irqrestore(&p->pi_lock, flags);
  	} while_each_thread(g, p);
-Index: linux-cfs-2.6.22-git.q/kernel/sched_debug.c
+Index: linux/kernel/sched_debug.c
 ===================================================================
 --- /dev/null
-+++ linux-cfs-2.6.22-git.q/kernel/sched_debug.c
++++ linux/kernel/sched_debug.c
 @@ -0,0 +1,260 @@
 +/*
 + * kernel/time/sched_debug.c
@@ -5432,7 +5452,7 @@
 +	u64 now = ktime_to_ns(ktime_get());
 +	int cpu;
 +
-+	SEQ_printf(m, "Sched Debug Version: v0.03, %s %.*s\n",
++	SEQ_printf(m, "Sched Debug Version: v0.03, cfs-v18, %s %.*s\n",
 +		init_utsname()->release,
 +		(int)strcspn(init_utsname()->version, " "),
 +		init_utsname()->version);
@@ -5533,11 +5553,11 @@
 +	p->se.wait_runtime_overruns = p->se.wait_runtime_underruns = 0;
 +	p->se.sum_exec_runtime = 0;
 +}
-Index: linux-cfs-2.6.22-git.q/kernel/sched_fair.c
+Index: linux/kernel/sched_fair.c
 ===================================================================
 --- /dev/null
-+++ linux-cfs-2.6.22-git.q/kernel/sched_fair.c
-@@ -0,0 +1,883 @@
++++ linux/kernel/sched_fair.c
+@@ -0,0 +1,884 @@
 +/*
 + * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
 + *
@@ -5882,8 +5902,9 @@
 +		s64 tmp;
 +
 +		if (se->wait_runtime < 0) {
-+			tmp = (0 - se->wait_runtime) << NICE_0_SHIFT;
-+			key += (tmp * se->load.inv_weight) >> WMULT_SHIFT;
++			tmp = -se->wait_runtime;
++			key += (tmp * se->load.inv_weight) >>
++					(WMULT_SHIFT - NICE_0_SHIFT);
 +		} else {
 +			tmp = se->wait_runtime * se->load.weight;
 +			key -= tmp >> NICE_0_SHIFT;
@@ -6421,10 +6442,10 @@
 +	.task_tick		= task_tick_fair,
 +	.task_new		= task_new_fair,
 +};
-Index: linux-cfs-2.6.22-git.q/kernel/sched_idletask.c
+Index: linux/kernel/sched_idletask.c
 ===================================================================
 --- /dev/null
-+++ linux-cfs-2.6.22-git.q/kernel/sched_idletask.c
++++ linux/kernel/sched_idletask.c
 @@ -0,0 +1,68 @@
 +/*
 + * idle-task scheduling class.
@@ -6494,10 +6515,10 @@
 +	.task_tick		= task_tick_idle,
 +	/* no .task_new for idle tasks */
 +};
-Index: linux-cfs-2.6.22-git.q/kernel/sched_rt.c
+Index: linux/kernel/sched_rt.c
 ===================================================================
 --- /dev/null
-+++ linux-cfs-2.6.22-git.q/kernel/sched_rt.c
++++ linux/kernel/sched_rt.c
 @@ -0,0 +1,215 @@
 +/*
 + * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
@@ -6714,10 +6735,10 @@
 +	.task_tick		= task_tick_rt,
 +	.task_new		= task_new_rt,
 +};
-Index: linux-cfs-2.6.22-git.q/kernel/sched_stats.h
+Index: linux/kernel/sched_stats.h
 ===================================================================
 --- /dev/null
-+++ linux-cfs-2.6.22-git.q/kernel/sched_stats.h
++++ linux/kernel/sched_stats.h
 @@ -0,0 +1,235 @@
 +
 +#ifdef CONFIG_SCHEDSTATS
@@ -6954,10 +6975,10 @@
 +#define sched_info_switch(t, next)	do { } while (0)
 +#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
 +
-Index: linux-cfs-2.6.22-git.q/kernel/softirq.c
+Index: linux/kernel/softirq.c
 ===================================================================
---- linux-cfs-2.6.22-git.q.orig/kernel/softirq.c
-+++ linux-cfs-2.6.22-git.q/kernel/softirq.c
+--- linux.orig/kernel/softirq.c
++++ linux/kernel/softirq.c
 @@ -488,7 +488,6 @@ void __init softirq_init(void)
  
  static int ksoftirqd(void * __bind_cpu)
@@ -6966,10 +6987,10 @@
  	current->flags |= PF_NOFREEZE;
  
  	set_current_state(TASK_INTERRUPTIBLE);
-Index: linux-cfs-2.6.22-git.q/kernel/sysctl.c
+Index: linux/kernel/sysctl.c
 ===================================================================
---- linux-cfs-2.6.22-git.q.orig/kernel/sysctl.c
-+++ linux-cfs-2.6.22-git.q/kernel/sysctl.c
+--- linux.orig/kernel/sysctl.c
++++ linux/kernel/sysctl.c
 @@ -206,8 +206,84 @@ static ctl_table root_table[] = {
  	{ .ctl_name = 0 }
  };
@@ -7055,20 +7076,3 @@
  		.ctl_name	= KERN_PANIC,
  		.procname	= "panic",
  		.data		= &panic_timeout,
-Index: linux/kernel/sched_fair.c
-===================================================================
---- linux.orig/kernel/sched_fair.c
-+++ linux/kernel/sched_fair.c
-@@ -342,8 +342,9 @@ update_stats_enqueue(struct cfs_rq *cfs_
- 		s64 tmp;
- 
- 		if (se->wait_runtime < 0) {
--			tmp = (0 - se->wait_runtime) << NICE_0_SHIFT;
--			key += (tmp * se->load.inv_weight) >> WMULT_SHIFT;
-+			tmp = -se->wait_runtime;
-+			key += (tmp * se->load.inv_weight) >>
-+					(WMULT_SHIFT - NICE_0_SHIFT);
- 		} else {
- 			tmp = se->wait_runtime * se->load.weight;
- 			key -= tmp >> NICE_0_SHIFT;
-


Index: sources
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/sources,v
retrieving revision 1.600
retrieving revision 1.601
diff -u -r1.600 -r1.601
--- sources	24 Jun 2007 03:42:12 -0000	1.600
+++ sources	25 Jun 2007 23:52:57 -0000	1.601
@@ -1,3 +1,2 @@
 1b515f588078dfa7f4bab2634bd17e80  linux-2.6.21.tar.bz2
-807de5a9464e23dfc6336ddc1c07c24f  patch-2.6.22-rc5.bz2
-5051671283dc0ded75c5952c896fbc54  patch-2.6.22-rc5-git8.bz2
+8bb087bc36b73dd523356fc6b0a5bd34  patch-2.6.22-rc6.bz2


Index: upstream
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/upstream,v
retrieving revision 1.522
retrieving revision 1.523
diff -u -r1.522 -r1.523
--- upstream	24 Jun 2007 03:42:12 -0000	1.522
+++ upstream	25 Jun 2007 23:52:57 -0000	1.523
@@ -1,3 +1,2 @@
 linux-2.6.21.tar.bz2
-patch-2.6.22-rc5.bz2
-patch-2.6.22-rc5-git8.bz2
+patch-2.6.22-rc6.bz2


--- patch-2.6.22-rc5-git8.bz2.sign DELETED ---


--- patch-2.6.22-rc5.bz2.sign DELETED ---




More information about the scm-commits mailing list