rpms/kernel/F-7 linux-2.6-utrace-core.patch, 1.2, 1.3 linux-2.6-utrace-ptrace-compat-avr32.patch, NONE, 1.1 linux-2.6-utrace-ptrace-compat-ia64.patch, 1.2, 1.3 linux-2.6-utrace-ptrace-compat-s390.patch, 1.2, 1.3 linux-2.6-utrace-ptrace-compat-sparc64.patch, 1.2, 1.3 linux-2.6-utrace-ptrace-compat.patch, 1.2, 1.3 linux-2.6-utrace-regset-avr32.patch, NONE, 1.1 linux-2.6-utrace-regset-ia64.patch, 1.2, 1.3 linux-2.6-utrace-regset-s390.patch, 1.2, 1.3 linux-2.6-utrace-regset-sparc64.patch, 1.2, 1.3 linux-2.6-utrace-regset.patch, 1.2, 1.3 linux-2.6-utrace-tracehook-avr32.patch, NONE, 1.1 linux-2.6-utrace-tracehook-ia64.patch, 1.2, 1.3 linux-2.6-utrace-tracehook-s390.patch, 1.2, 1.3 linux-2.6-utrace-tracehook-sparc64.patch, 1.2, 1.3 linux-2.6-utrace-tracehook-um.patch, 1.2, 1.3 linux-2.6-utrace-tracehook.patch, 1.2, 1.3 kernel-2.6.spec, 1.3287, 1.3288 linux-2.6-sched-cfs.patch, 1.6, 1.7 linux-2.6-utrace.patch, 1.60, NONE
Chuck Ebbert (cebbert)
fedora-extras-commits at redhat.com
Fri Jul 20 18:48:40 UTC 2007
Author: cebbert
Update of /cvs/pkgs/rpms/kernel/F-7
In directory cvs-int.fedora.redhat.com:/tmp/cvs-serv15111
Modified Files:
kernel-2.6.spec linux-2.6-sched-cfs.patch
Added Files:
linux-2.6-utrace-core.patch
linux-2.6-utrace-ptrace-compat-avr32.patch
linux-2.6-utrace-ptrace-compat-ia64.patch
linux-2.6-utrace-ptrace-compat-s390.patch
linux-2.6-utrace-ptrace-compat-sparc64.patch
linux-2.6-utrace-ptrace-compat.patch
linux-2.6-utrace-regset-avr32.patch
linux-2.6-utrace-regset-ia64.patch
linux-2.6-utrace-regset-s390.patch
linux-2.6-utrace-regset-sparc64.patch
linux-2.6-utrace-regset.patch
linux-2.6-utrace-tracehook-avr32.patch
linux-2.6-utrace-tracehook-ia64.patch
linux-2.6-utrace-tracehook-s390.patch
linux-2.6-utrace-tracehook-sparc64.patch
linux-2.6-utrace-tracehook-um.patch
linux-2.6-utrace-tracehook.patch
Removed Files:
linux-2.6-utrace.patch
Log Message:
* Fri Jul 20 2007 Chuck Ebbert <cebbert at redhat.com>
- utrace update
- CFS scheduler update
linux-2.6-utrace-core.patch:
View full diff with command:
/usr/bin/cvs -f diff -kk -u -N -r 1.2 -r 1.3 linux-2.6-utrace-core.patch
Index: linux-2.6-utrace-core.patch
===================================================================
RCS file: linux-2.6-utrace-core.patch
diff -N linux-2.6-utrace-core.patch
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ linux-2.6-utrace-core.patch 20 Jul 2007 18:48:03 -0000 1.3
@@ -0,0 +1,3811 @@
+[PATCH 3] utrace core
+
+This adds the utrace facility, a new modular interface in the kernel for
+implementing user thread tracing and debugging. This fits on top of the
+tracehook_* layer, so the new code is well-isolated.
+
+The new interface is in <linux/utrace.h>, and Documentation/utrace.txt
+describes it. It allows for multiple separate tracing engines to work in
+parallel without interfering with each other. Higher-level tracing
+facilities can be implemented as loadable kernel modules using this layer.
+
+The new facility is made optional under CONFIG_UTRACE.
+Normal configurations will always want to enable it.
+It's optional to emphasize the clean separation of the code,
+and in case some stripped-down embedded configurations might want to
+omit it to save space (when ptrace and the like can never be used).
+
+Signed-off-by: Roland McGrath <roland at redhat.com>
+
+---
+
+ Documentation/DocBook/Makefile | 2
+ Documentation/DocBook/utrace.tmpl | 23
+ Documentation/utrace.txt | 579 +++++++++
+ include/linux/sched.h | 5
+ include/linux/tracehook.h | 85 +
+ include/linux/utrace.h | 544 +++++++++
+ init/Kconfig | 18
+ kernel/Makefile | 1
+ kernel/utrace.c | 2263 ++++++++++++++++++++++++++++++++++++++
+ 9 files changed, 3502 insertions(+), 18 deletions(-)
+ create kernel/utrace.c
+ create Documentation/utrace.txt
+ create Documentation/DocBook/utrace.tmpl
+ create include/linux/utrace.h
+
+Index: b/kernel/Makefile
+===================================================================
+--- a/kernel/Makefile
++++ b/kernel/Makefile
+@@ -51,6 +51,7 @@ obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
+ obj-$(CONFIG_UTS_NS) += utsname.o
+ obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
+ obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o
++obj-$(CONFIG_UTRACE) += utrace.o
+
+ ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y)
+ # According to Alan Modra <alan at linuxcare.com.au>, the -fno-omit-frame-pointer is
+Index: b/kernel/utrace.c
+===================================================================
+--- /dev/null
++++ b/kernel/utrace.c
+@@ -0,0 +1,2263 @@
++/*
++ * utrace infrastructure interface for debugging user processes
++ *
++ * Copyright (C) 2006, 2007 Red Hat, Inc. All rights reserved.
++ *
++ * This copyrighted material is made available to anyone wishing to use,
++ * modify, copy, or redistribute it subject to the terms and conditions
++ * of the GNU General Public License v.2.
++ *
++ * Red Hat Author: Roland McGrath.
++ */
++
++#include <linux/utrace.h>
++#include <linux/tracehook.h>
++#include <linux/err.h>
++#include <linux/sched.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <asm/tracehook.h>
++
++
++#define UTRACE_DEBUG 1
++#ifdef UTRACE_DEBUG
++#define CHECK_INIT(p) atomic_set(&(p)->check_dead, 1)
++#define CHECK_DEAD(p) BUG_ON(!atomic_dec_and_test(&(p)->check_dead))
++#else
++#define CHECK_INIT(p) do { } while (0)
++#define CHECK_DEAD(p) do { } while (0)
++#endif
++
++/*
++ * Per-thread structure task_struct.utrace points to.
++ *
++ * The task itself never has to worry about this going away after
++ * some event is found set in task_struct.utrace_flags.
++ * Once created, this pointer is changed only when the task is quiescent
++ * (TASK_TRACED or TASK_STOPPED with the siglock held, or dead).
++ *
++ * For other parties, the pointer to this is protected by RCU and
++ * task_lock. Since call_rcu is never used while the thread is alive and
++ * using this struct utrace, we can overlay the RCU data structure used
++ * only for a dead struct with some local state used only for a live utrace
++ * on an active thread.
++ */
++struct utrace
++{
++ union {
++ struct rcu_head dead;
++ struct {
++ struct task_struct *cloning;
++ struct utrace_signal *signal;
++ } live;
++ struct {
++ unsigned long flags;
++ } exit;
++ } u;
++
++ struct list_head engines;
++ spinlock_t lock;
++#ifdef UTRACE_DEBUG
++ atomic_t check_dead;
++#endif
++};
++
++static struct kmem_cache *utrace_cachep;
++static struct kmem_cache *utrace_engine_cachep;
++
++static int __init
++utrace_init(void)
++{
++ utrace_cachep =
++ kmem_cache_create("utrace_cache",
++ sizeof(struct utrace), 0,
++ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
++ utrace_engine_cachep =
++ kmem_cache_create("utrace_engine_cache",
++ sizeof(struct utrace_attached_engine), 0,
++ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
++ return 0;
++}
++subsys_initcall(utrace_init);
++
++
++/*
++ * Make sure target->utrace is allocated, and return with it locked on
++ * success. This function mediates startup races. The creating parent
++ * task has priority, and other callers will delay here to let its call
++ * succeed and take the new utrace lock first.
++ */
++static struct utrace *
++utrace_first_engine(struct task_struct *target,
++ struct utrace_attached_engine *engine)
++ __acquires(utrace->lock)
++{
++ struct utrace *utrace;
++
++ /*
++ * If this is a newborn thread and we are not the creator,
++ * we have to wait for it. The creator gets the first chance
++ * to attach. The PF_STARTING flag is cleared after its
++ * report_clone hook has had a chance to run.
++ */
++ if (target->flags & PF_STARTING) {
++ utrace = current->utrace;
++ if (utrace == NULL || utrace->u.live.cloning != target) {
++ yield();
++ return (signal_pending(current)
++ ? ERR_PTR(-ERESTARTNOINTR) : NULL);
++ }
++ }
++
++ utrace = kmem_cache_alloc(utrace_cachep, GFP_KERNEL);
++ if (unlikely(utrace == NULL))
++ return ERR_PTR(-ENOMEM);
++
++ utrace->u.live.cloning = NULL;
++ utrace->u.live.signal = NULL;
++ INIT_LIST_HEAD(&utrace->engines);
++ list_add(&engine->entry, &utrace->engines);
++ spin_lock_init(&utrace->lock);
++ CHECK_INIT(utrace);
++
++ spin_lock(&utrace->lock);
++ task_lock(target);
++ if (likely(target->utrace == NULL)) {
++ rcu_assign_pointer(target->utrace, utrace);
++
++ /*
++ * The task_lock protects us against another thread doing
++ * the same thing. We might still be racing against
++ * tracehook_release_task. It's called with ->exit_state
++ * set to EXIT_DEAD and then checks ->utrace with an
++ * smp_mb() in between. If EXIT_DEAD is set, then
++ * release_task might have checked ->utrace already and saw
++ * it NULL; we can't attach. If we see EXIT_DEAD not yet
++ * set after our barrier, then we know release_task will
++ * see our target->utrace pointer.
++ */
++ smp_mb();
[...3418 lines suppressed...]
++ struct task_struct *(*tracer_task)(struct utrace_attached_engine *,
++ struct task_struct *target);
++};
++
++
++/*
++ * These are the exported entry points for tracing engines to use.
++ */
++struct utrace_attached_engine *utrace_attach(struct task_struct *target,
++ int flags,
++ const struct utrace_engine_ops *,
++ void *data);
++int utrace_detach(struct task_struct *target,
++ struct utrace_attached_engine *engine);
++int utrace_set_flags(struct task_struct *target,
++ struct utrace_attached_engine *engine,
++ unsigned long flags);
++int utrace_inject_signal(struct task_struct *target,
++ struct utrace_attached_engine *engine,
++ u32 action, siginfo_t *info,
++ const struct k_sigaction *ka);
++const struct utrace_regset *utrace_regset(struct task_struct *target,
++ struct utrace_attached_engine *,
++ const struct utrace_regset_view *,
++ int which);
++
++
++/*
++ * Hooks in <linux/tracehook.h> call these entry points to the utrace dispatch.
++ */
++int utrace_quiescent(struct task_struct *, struct utrace_signal *);
++void utrace_release_task(struct task_struct *);
++int utrace_get_signal(struct task_struct *, struct pt_regs *,
++ siginfo_t *, struct k_sigaction *);
++void utrace_report_clone(unsigned long clone_flags, struct task_struct *child);
++void utrace_report_vfork_done(pid_t child_pid);
++void utrace_report_exit(long *exit_code);
++void utrace_report_death(struct task_struct *, struct utrace *);
++void utrace_report_delayed_group_leader(struct task_struct *);
++int utrace_report_jctl(int type);
++void utrace_report_exec(struct linux_binprm *bprm, struct pt_regs *regs);
++void utrace_report_syscall(struct pt_regs *regs, int is_exit);
++struct task_struct *utrace_tracer_task(struct task_struct *);
++int utrace_allow_access_process_vm(struct task_struct *);
++int utrace_unsafe_exec(struct task_struct *);
++void utrace_signal_handler_singlestep(struct task_struct *, struct pt_regs *);
++
++/*
++ * <linux/tracehook.h> uses these accessors to avoid #ifdef CONFIG_UTRACE.
++ */
++static inline unsigned long tsk_utrace_flags(struct task_struct *tsk)
++{
++ return tsk->utrace_flags;
++}
++static inline struct utrace *tsk_utrace_struct(struct task_struct *tsk)
++{
++ return tsk->utrace;
++}
++static inline void utrace_init_task(struct task_struct *child)
++{
++ child->utrace_flags = 0;
++ child->utrace = NULL;
++}
++
++#else /* !CONFIG_UTRACE */
++
++static unsigned long tsk_utrace_flags(struct task_struct *tsk)
++{
++ return 0;
++}
++static struct utrace *tsk_utrace_struct(struct task_struct *tsk)
++{
++ return NULL;
++}
++static inline void utrace_init_task(struct task_struct *child)
++{
++}
++
++/*
++ * The calls to these should all be in if (0) and optimized out entirely.
++ * We have stubs here only so tracehook.h doesn't need to #ifdef them
++ * to avoid external references in case of unoptimized compilation.
++ */
++static inline int utrace_quiescent(struct task_struct *tsk, void *ignored)
++{
++ BUG();
++ return 0;
++}
++static inline void utrace_release_task(struct task_struct *tsk)
++{
++ BUG();
++}
++static inline int utrace_get_signal(struct task_struct *tsk,
++ struct pt_regs *regs,
++ siginfo_t *info, struct k_sigaction *ka)
++{
++ BUG();
++ return 0;
++}
++static inline void utrace_report_clone(unsigned long clone_flags,
++ struct task_struct *child)
++{
++ BUG();
++}
++static inline void utrace_report_vfork_done(pid_t child_pid)
++{
++ BUG();
++}
++static inline void utrace_report_exit(long *exit_code)
++{
++ BUG();
++}
++static inline void utrace_report_death(struct task_struct *tsk, void *ignored)
++{
++ BUG();
++}
++static inline void utrace_report_delayed_group_leader(struct task_struct *tsk)
++{
++ BUG();
++}
++static inline int utrace_report_jctl(int type)
++{
++ BUG();
++ return 0;
++}
++static inline void utrace_report_exec(struct linux_binprm *bprm,
++ struct pt_regs *regs)
++{
++ BUG();
++}
++static inline void utrace_report_syscall(struct pt_regs *regs, int is_exit)
++{
++ BUG();
++}
++static inline struct task_struct *utrace_tracer_task(struct task_struct *tsk)
++{
++ BUG();
++ return NULL;
++}
++static inline int utrace_allow_access_process_vm(struct task_struct *tsk)
++{
++ BUG();
++ return 0;
++}
++static inline int utrace_unsafe_exec(struct task_struct *tsk)
++{
++ BUG();
++ return 0;
++}
++static inline void utrace_signal_handler_singlestep(struct task_struct *tsk,
++ struct pt_regs *regs)
++{
++ BUG();
++}
++
++#endif /* CONFIG_UTRACE */
++
++#endif /* linux/utrace.h */
+Index: b/include/linux/sched.h
+===================================================================
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -962,6 +962,11 @@ struct task_struct {
+ struct audit_context *audit_context;
+ seccomp_t seccomp;
+
++#ifdef CONFIG_UTRACE
++ struct utrace *utrace;
++ unsigned long utrace_flags;
++#endif
++
+ /* Thread group tracking */
+ u32 parent_exec_id;
+ u32 self_exec_id;
+Index: b/init/Kconfig
+===================================================================
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -689,3 +689,21 @@ endmenu
+ menu "Block layer"
+ source "block/Kconfig"
+ endmenu
++
++menu "Process debugging support"
++
++config UTRACE
++ bool "Infrastructure for tracing and debugging user processes"
++ default y
++ depends on MODULES
++ help
++ Enable the utrace process tracing interface.
++ This is an internal kernel interface to track events in user
++ threads, extract and change user thread state. This interface
++ is exported to kernel modules, and is also used to implement ptrace.
++ If you disable this, no facilities for debugging user processes
++ will be available, nor the facilities used by UML and other
++ applications. Unless you are making a specially stripped-down
++ kernel and are very sure you don't need these facilitiies,
++ say Y.
++endmenu
linux-2.6-utrace-ptrace-compat-avr32.patch:
--- NEW FILE linux-2.6-utrace-ptrace-compat-avr32.patch ---
[PATCH 4d] utrace: avr32 ptrace compatibility
From: Haavard Skinnemoen <hskinnemoen at atmel.com>
Rip out most of the ptrace code for AVR32 and replace it with the much
nicer utrace stuff. It builds in all possible combinations of
CONFIG_UTRACE and CONFIG_PTRACE, and it seems to work as far as I've tested
it with strace and some simple debugging with gdb.
Signed-off-by: Haavard Skinnemoen <hskinnemoen at atmel.com>
Signed-off-by: Roland McGrath <roland at redhat.com>
---
arch/avr32/kernel/ptrace.c | 107 +++++++-------------------------------------
1 files changed, 18 insertions(+), 89 deletions(-)
--- linux-2.6/arch/avr32/kernel/ptrace.c
+++ linux-2.6/arch/avr32/kernel/ptrace.c
@@ -78,115 +78,44 @@ const struct utrace_regset_view *utrace_
}
#endif /* CONFIG_UTRACE */
+#ifdef CONFIG_PTRACE
-#if 0
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+static const struct ptrace_layout_segment avr32_uarea[] = {
+ { 0, ELF_NGREG * sizeof(long), 0, 0 },
+ { 0, 0, -1, 0 },
+};
+
+int arch_ptrace(long *request, struct task_struct *child,
+ struct utrace_attached_engine *engine,
+ unsigned long addr, unsigned long data, long *val)
{
- unsigned long tmp;
- int ret;
-
pr_debug("arch_ptrace(%ld, %d, %#lx, %#lx)\n",
- request, child->pid, addr, data);
+ *request, child->pid, addr, data);
pr_debug("ptrace: Enabling monitor mode...\n");
__mtdr(DBGREG_DC, __mfdr(DBGREG_DC) | DC_MM | DC_DBE);
- switch (request) {
- /* Read the word at location addr in the child process */
- case PTRACE_PEEKTEXT:
- case PTRACE_PEEKDATA:
- ret = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
- if (ret == sizeof(tmp))
- ret = put_user(tmp, (unsigned long __user *)data);
- else
- ret = -EIO;
- break;
-
+ switch (*request) {
case PTRACE_PEEKUSR:
- ret = ptrace_read_user(child, addr,
- (unsigned long __user *)data);
- break;
-
- /* Write the word in data at location addr */
- case PTRACE_POKETEXT:
- case PTRACE_POKEDATA:
- ret = access_process_vm(child, addr, &data, sizeof(data), 1);
- if (ret == sizeof(data))
- ret = 0;
- else
- ret = -EIO;
- break;
+ return ptrace_peekusr(child, engine, avr32_uarea, addr, data);
case PTRACE_POKEUSR:
- ret = ptrace_write_user(child, addr, data);
- break;
-
- /* continue and stop at next (return from) syscall */
- case PTRACE_SYSCALL:
- /* restart after signal */
- case PTRACE_CONT:
- ret = -EIO;
- if (!valid_signal(data))
- break;
- if (request == PTRACE_SYSCALL)
- set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- else
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- child->exit_code = data;
- /* XXX: Are we sure no breakpoints are active here? */
- wake_up_process(child);
- ret = 0;
- break;
-
- /*
- * Make the child exit. Best I can do is send it a
- * SIGKILL. Perhaps it should be put in the status that it
- * wants to exit.
- */
- case PTRACE_KILL:
- ret = 0;
- if (child->exit_state == EXIT_ZOMBIE)
- break;
- child->exit_code = SIGKILL;
- wake_up_process(child);
- break;
-
- /*
- * execute single instruction.
- */
- case PTRACE_SINGLESTEP:
- ret = -EIO;
- if (!valid_signal(data))
- break;
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- ptrace_single_step(child);
- child->exit_code = data;
- wake_up_process(child);
- ret = 0;
- break;
-
- /* Detach a process that was attached */
- case PTRACE_DETACH:
- ret = ptrace_detach(child, data);
+ return ptrace_pokeusr(child, engine, avr32_uarea, addr, data);
break;
case PTRACE_GETREGS:
- ret = ptrace_getregs(child, (void __user *)data);
+ return ptrace_whole_regset(child, engine, data, 0, 0);
break;
case PTRACE_SETREGS:
- ret = ptrace_setregs(child, (const void __user *)data);
- break;
-
- default:
- ret = ptrace_request(child, request, addr, data);
+ return ptrace_whole_regset(child, engine, data, 0, 1);
break;
}
- pr_debug("sys_ptrace returning %d (DC = 0x%08lx)\n", ret, __mfdr(DBGREG_DC));
- return ret;
+ return -ENOSYS;
}
-#endif
+#endif /* CONFIG_PTRACE */
+#endif /* CONFIG_UTRACE */
asmlinkage void syscall_trace(struct pt_regs *regs, int is_exit)
{
linux-2.6-utrace-ptrace-compat-ia64.patch:
Index: linux-2.6-utrace-ptrace-compat-ia64.patch
===================================================================
RCS file: linux-2.6-utrace-ptrace-compat-ia64.patch
diff -N linux-2.6-utrace-ptrace-compat-ia64.patch
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ linux-2.6-utrace-ptrace-compat-ia64.patch 20 Jul 2007 18:48:03 -0000 1.3
@@ -0,0 +1,1106 @@
+[PATCH 4a] utrace: ia64 ptrace compatibility
+
+This patch implements ptrace compatibility for ia64.
+
+Signed-off-by: Roland McGrath <roland at redhat.com>
+Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy at intel.com>
+Signed-off-by: Bibo mao <bibo.mao at intel.com>
+
+---
+
+ arch/ia64/ia32/sys_ia32.c | 40 ++
+ arch/ia64/kernel/ptrace.c | 1016 +++++----------------------------------------
+ 2 files changed, 159 insertions(+), 897 deletions(-)
+
+--- linux-2.6/arch/ia64/ia32/sys_ia32.c
++++ linux-2.6/arch/ia64/ia32/sys_ia32.c
+@@ -2340,6 +2340,46 @@ const struct utrace_regset_view utrace_i
+ };
+ #endif
+
++#ifdef CONFIG_PTRACE
++/*
++ * This matches the arch/i386/kernel/ptrace.c definitions.
++ */
++
++static const struct ptrace_layout_segment ia32_uarea[] = {
++ {0, sizeof(struct user_regs_struct32), 0, 0},
++ {0, 0, -1, 0}
++};
++
++fastcall int arch_compat_ptrace(compat_long_t *request,
++ struct task_struct *child,
++ struct utrace_attached_engine *engine,
++ compat_ulong_t addr, compat_ulong_t data,
++ compat_long_t *retval)
++{
++ switch (*request) {
++ case PTRACE_PEEKUSR:
++ return ptrace_compat_peekusr(child, engine, ia32_uarea,
++ addr, data);
++ case PTRACE_POKEUSR:
++ return ptrace_compat_pokeusr(child, engine, ia32_uarea,
++ addr, data);
++ case IA32_PTRACE_GETREGS:
++ return ptrace_whole_regset(child, engine, data, 0, 0);
++ case IA32_PTRACE_SETREGS:
++ return ptrace_whole_regset(child, engine, data, 0, 1);
++ case IA32_PTRACE_GETFPREGS:
++ return ptrace_whole_regset(child, engine, data, 1, 0);
++ case IA32_PTRACE_SETFPREGS:
++ return ptrace_whole_regset(child, engine, data, 1, 1);
++ case IA32_PTRACE_GETFPXREGS:
++ return ptrace_whole_regset(child, engine, data, 2, 0);
++ case IA32_PTRACE_SETFPXREGS:
++ return ptrace_whole_regset(child, engine, data, 2, 1);
++ }
++ return -ENOSYS;
++}
++#endif
++
+ typedef struct {
+ unsigned int ss_sp;
+ unsigned int ss_flags;
+--- linux-2.6/arch/ia64/kernel/ptrace.c
++++ linux-2.6/arch/ia64/kernel/ptrace.c
+@@ -554,81 +554,6 @@ ia64_sync_user_rbs (struct task_struct *
+ return 0;
+ }
+
+-#if 0 /* XXX */
+-static inline int
+-thread_matches (struct task_struct *thread, unsigned long addr)
+-{
+- unsigned long thread_rbs_end;
+- struct pt_regs *thread_regs;
+-
+- if (ptrace_check_attach(thread, 0) < 0)
+- /*
+- * If the thread is not in an attachable state, we'll
+- * ignore it. The net effect is that if ADDR happens
+- * to overlap with the portion of the thread's
+- * register backing store that is currently residing
+- * on the thread's kernel stack, then ptrace() may end
+- * up accessing a stale value. But if the thread
+- * isn't stopped, that's a problem anyhow, so we're
+- * doing as well as we can...
+- */
+- return 0;
+-
+- thread_regs = task_pt_regs(thread);
+- thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL);
+- if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end))
+- return 0;
+-
+- return 1; /* looks like we've got a winner */
+-}
+-
+-/*
+- * GDB apparently wants to be able to read the register-backing store
+- * of any thread when attached to a given process. If we are peeking
+- * or poking an address that happens to reside in the kernel-backing
+- * store of another thread, we need to attach to that thread, because
+- * otherwise we end up accessing stale data.
+- *
+- * task_list_lock must be read-locked before calling this routine!
+- */
+-static struct task_struct *
+-find_thread_for_addr (struct task_struct *child, unsigned long addr)
+-{
+- struct task_struct *p;
+- struct mm_struct *mm;
+- struct list_head *this, *next;
+- int mm_users;
+-
+- if (!(mm = get_task_mm(child)))
+- return child;
+-
+- /* -1 because of our get_task_mm(): */
+- mm_users = atomic_read(&mm->mm_users) - 1;
+- if (mm_users <= 1)
+- goto out; /* not multi-threaded */
+-
+- /*
+- * Traverse the current process' children list. Every task that
+- * one attaches to becomes a child. And it is only attached children
+- * of the debugger that are of interest (ptrace_check_attach checks
+- * for this).
+- */
+- list_for_each_safe(this, next, ¤t->children) {
+- p = list_entry(this, struct task_struct, sibling);
+- if (p->tgid != child->tgid)
+- continue;
+- if (thread_matches(p, addr)) {
+- child = p;
+- goto out;
+- }
+- }
+-
+- out:
+- mmput(mm);
+- return child;
+-}
+-#endif
+-
+ /*
+ * Write f32-f127 back to task->thread.fph if it has been modified.
+ */
+@@ -792,828 +717,6 @@ access_nat_bits (struct task_struct *chi
+ return 0;
+ }
+
+-#if 0
+-static int
+-access_uarea (struct task_struct *child, unsigned long addr,
+- unsigned long *data, int write_access)
+-{
+- unsigned long *ptr, regnum, urbs_end, rnat_addr, cfm;
+- struct switch_stack *sw;
+- struct pt_regs *pt;
+-# define pt_reg_addr(pt, reg) ((void *) \
+- ((unsigned long) (pt) \
+- + offsetof(struct pt_regs, reg)))
+-
+-
+- pt = task_pt_regs(child);
+- sw = (struct switch_stack *) (child->thread.ksp + 16);
+-
+- if ((addr & 0x7) != 0) {
+- dprintk("ptrace: unaligned register address 0x%lx\n", addr);
+- return -1;
+- }
+-
+- if (addr < PT_F127 + 16) {
+- /* accessing fph */
+- if (write_access)
+- ia64_sync_fph(child);
+- else
+- ia64_flush_fph(child);
+- ptr = (unsigned long *)
+- ((unsigned long) &child->thread.fph + addr);
+- } else if ((addr >= PT_F10) && (addr < PT_F11 + 16)) {
+- /* scratch registers untouched by kernel (saved in pt_regs) */
+- ptr = pt_reg_addr(pt, f10) + (addr - PT_F10);
+- } else if (addr >= PT_F12 && addr < PT_F15 + 16) {
+- /*
+- * Scratch registers untouched by kernel (saved in
+- * switch_stack).
+- */
+- ptr = (unsigned long *) ((long) sw
+- + (addr - PT_NAT_BITS - 32));
+- } else if (addr < PT_AR_LC + 8) {
+- /* preserved state: */
+- struct unw_frame_info info;
+- char nat = 0;
+- int ret;
+-
+- unw_init_from_blocked_task(&info, child);
+- if (unw_unwind_to_user(&info) < 0)
+- return -1;
+-
+- switch (addr) {
+- case PT_NAT_BITS:
+- return access_nat_bits(child, pt, &info,
+- data, write_access);
+-
+- case PT_R4: case PT_R5: case PT_R6: case PT_R7:
+- if (write_access) {
+- /* read NaT bit first: */
+- unsigned long dummy;
+-
+- ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4,
+- &dummy, &nat);
+- if (ret < 0)
+- return ret;
+- }
+- return unw_access_gr(&info, (addr - PT_R4)/8 + 4, data,
+- &nat, write_access);
+-
+- case PT_B1: case PT_B2: case PT_B3:
+- case PT_B4: case PT_B5:
+- return unw_access_br(&info, (addr - PT_B1)/8 + 1, data,
+- write_access);
+-
+- case PT_AR_EC:
+- return unw_access_ar(&info, UNW_AR_EC, data,
+- write_access);
+-
+- case PT_AR_LC:
+- return unw_access_ar(&info, UNW_AR_LC, data,
+- write_access);
+-
+- default:
+- if (addr >= PT_F2 && addr < PT_F5 + 16)
+- return access_fr(&info, (addr - PT_F2)/16 + 2,
+- (addr & 8) != 0, data,
+- write_access);
+- else if (addr >= PT_F16 && addr < PT_F31 + 16)
+- return access_fr(&info,
+- (addr - PT_F16)/16 + 16,
+- (addr & 8) != 0,
+- data, write_access);
+- else {
+- dprintk("ptrace: rejecting access to register "
+- "address 0x%lx\n", addr);
+- return -1;
+- }
+- }
+- } else if (addr < PT_F9+16) {
+- /* scratch state */
+- switch (addr) {
+- case PT_AR_BSP:
+- /*
+- * By convention, we use PT_AR_BSP to refer to
+- * the end of the user-level backing store.
+- * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
+- * to get the real value of ar.bsp at the time
+- * the kernel was entered.
+- *
+- * Furthermore, when changing the contents of
+- * PT_AR_BSP (or PT_CFM) we MUST copy any
+- * users-level stacked registers that are
+- * stored on the kernel stack back to
+- * user-space because otherwise, we might end
+- * up clobbering kernel stacked registers.
+- * Also, if this happens while the task is
+- * blocked in a system call, which convert the
+- * state such that the non-system-call exit
+- * path is used. This ensures that the proper
+- * state will be picked up when resuming
+- * execution. However, it *also* means that
+- * once we write PT_AR_BSP/PT_CFM, it won't be
+- * possible to modify the syscall arguments of
+- * the pending system call any longer. This
+- * shouldn't be an issue because modifying
+- * PT_AR_BSP/PT_CFM generally implies that
+- * we're either abandoning the pending system
+- * call or that we defer it's re-execution
+- * (e.g., due to GDB doing an inferior
+- * function call).
+- */
+- urbs_end = ia64_get_user_rbs_end(child, pt, &cfm);
+- if (write_access) {
+- if (*data != urbs_end) {
+- if (ia64_sync_user_rbs(child, sw,
+- pt->ar_bspstore,
+- urbs_end) < 0)
+- return -1;
+- if (in_syscall(pt))
+- convert_to_non_syscall(child,
+- pt,
+- cfm);
+- /*
+- * Simulate user-level write
+- * of ar.bsp:
+- */
+- pt->loadrs = 0;
+- pt->ar_bspstore = *data;
+- }
+- } else
+- *data = urbs_end;
+- return 0;
+-
+- case PT_CFM:
+- urbs_end = ia64_get_user_rbs_end(child, pt, &cfm);
+- if (write_access) {
+- if (((cfm ^ *data) & PFM_MASK) != 0) {
+- if (ia64_sync_user_rbs(child, sw,
+- pt->ar_bspstore,
+- urbs_end) < 0)
+- return -1;
+- if (in_syscall(pt))
+- convert_to_non_syscall(child,
+- pt,
+- cfm);
+- pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
+- | (*data & PFM_MASK));
+- }
+- } else
+- *data = cfm;
+- return 0;
+-
+- case PT_CR_IPSR:
+- if (write_access)
+- pt->cr_ipsr = ((*data & IPSR_MASK)
+- | (pt->cr_ipsr & ~IPSR_MASK));
+- else
+- *data = (pt->cr_ipsr & IPSR_MASK);
+- return 0;
+-
+- case PT_AR_RSC:
+- if (write_access)
+- pt->ar_rsc = *data | (3 << 2); /* force PL3 */
+- else
+- *data = pt->ar_rsc;
+- return 0;
+-
+- case PT_AR_RNAT:
+- urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
+- rnat_addr = (long) ia64_rse_rnat_addr((long *)
+- urbs_end);
+- if (write_access)
+- return ia64_poke(child, sw, urbs_end,
+- rnat_addr, *data);
+- else
+- return ia64_peek(child, sw, urbs_end,
+- rnat_addr, data);
+-
+- case PT_R1:
+- ptr = pt_reg_addr(pt, r1);
+- break;
+- case PT_R2: case PT_R3:
+- ptr = pt_reg_addr(pt, r2) + (addr - PT_R2);
+- break;
+- case PT_R8: case PT_R9: case PT_R10: case PT_R11:
+- ptr = pt_reg_addr(pt, r8) + (addr - PT_R8);
+- break;
+- case PT_R12: case PT_R13:
+- ptr = pt_reg_addr(pt, r12) + (addr - PT_R12);
+- break;
+- case PT_R14:
+- ptr = pt_reg_addr(pt, r14);
+- break;
+- case PT_R15:
+- ptr = pt_reg_addr(pt, r15);
+- break;
+- case PT_R16: case PT_R17: case PT_R18: case PT_R19:
+- case PT_R20: case PT_R21: case PT_R22: case PT_R23:
+- case PT_R24: case PT_R25: case PT_R26: case PT_R27:
+- case PT_R28: case PT_R29: case PT_R30: case PT_R31:
+- ptr = pt_reg_addr(pt, r16) + (addr - PT_R16);
+- break;
+- case PT_B0:
+- ptr = pt_reg_addr(pt, b0);
+- break;
+- case PT_B6:
+- ptr = pt_reg_addr(pt, b6);
+- break;
+- case PT_B7:
+- ptr = pt_reg_addr(pt, b7);
+- break;
+- case PT_F6: case PT_F6+8: case PT_F7: case PT_F7+8:
+- case PT_F8: case PT_F8+8: case PT_F9: case PT_F9+8:
+- ptr = pt_reg_addr(pt, f6) + (addr - PT_F6);
+- break;
+- case PT_AR_BSPSTORE:
+- ptr = pt_reg_addr(pt, ar_bspstore);
+- break;
+- case PT_AR_UNAT:
+- ptr = pt_reg_addr(pt, ar_unat);
+- break;
+- case PT_AR_PFS:
+- ptr = pt_reg_addr(pt, ar_pfs);
+- break;
+- case PT_AR_CCV:
+- ptr = pt_reg_addr(pt, ar_ccv);
+- break;
+- case PT_AR_FPSR:
+- ptr = pt_reg_addr(pt, ar_fpsr);
+- break;
+- case PT_CR_IIP:
+- ptr = pt_reg_addr(pt, cr_iip);
+- break;
+- case PT_PR:
+- ptr = pt_reg_addr(pt, pr);
+- break;
+- /* scratch register */
+-
+- default:
+- /* disallow accessing anything else... */
+- dprintk("ptrace: rejecting access to register "
+- "address 0x%lx\n", addr);
+- return -1;
+- }
+- } else if (addr <= PT_AR_SSD) {
+- ptr = pt_reg_addr(pt, ar_csd) + (addr - PT_AR_CSD);
+- } else {
+- /* access debug registers */
+-
+- if (addr >= PT_IBR) {
+- regnum = (addr - PT_IBR) >> 3;
+- ptr = &child->thread.ibr[0];
+- } else {
+- regnum = (addr - PT_DBR) >> 3;
+- ptr = &child->thread.dbr[0];
+- }
+-
+- if (regnum >= 8) {
+- dprintk("ptrace: rejecting access to register "
+- "address 0x%lx\n", addr);
+- return -1;
+- }
+-#ifdef CONFIG_PERFMON
+- /*
+- * Check if debug registers are used by perfmon. This
+- * test must be done once we know that we can do the
+- * operation, i.e. the arguments are all valid, but
+- * before we start modifying the state.
+- *
+- * Perfmon needs to keep a count of how many processes
+- * are trying to modify the debug registers for system
+- * wide monitoring sessions.
+- *
+- * We also include read access here, because they may
+- * cause the PMU-installed debug register state
+- * (dbr[], ibr[]) to be reset. The two arrays are also
+- * used by perfmon, but we do not use
+- * IA64_THREAD_DBG_VALID. The registers are restored
+- * by the PMU context switch code.
+- */
+- if (pfm_use_debug_registers(child)) return -1;
+-#endif
+-
+- if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
+- child->thread.flags |= IA64_THREAD_DBG_VALID;
+- memset(child->thread.dbr, 0,
+- sizeof(child->thread.dbr));
+- memset(child->thread.ibr, 0,
+- sizeof(child->thread.ibr));
+- }
+-
+- ptr += regnum;
+-
+- if ((regnum & 1) && write_access) {
+- /* don't let the user set kernel-level breakpoints: */
+- *ptr = *data & ~(7UL << 56);
+- return 0;
+- }
+- }
+- if (write_access)
+- *ptr = *data;
+- else
+- *data = *ptr;
+- return 0;
+-}
+-
+-static long
+-ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
+-{
+- unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val;
+- struct unw_frame_info info;
+- struct ia64_fpreg fpval;
+- struct switch_stack *sw;
+- struct pt_regs *pt;
+- long ret, retval = 0;
+- char nat = 0;
+- int i;
+-
+- if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)))
+- return -EIO;
+-
+- pt = task_pt_regs(child);
+- sw = (struct switch_stack *) (child->thread.ksp + 16);
+- unw_init_from_blocked_task(&info, child);
+- if (unw_unwind_to_user(&info) < 0) {
+- return -EIO;
+- }
+-
+- if (((unsigned long) ppr & 0x7) != 0) {
+- dprintk("ptrace:unaligned register address %p\n", ppr);
+- return -EIO;
+- }
+-
+- if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0
+- || access_uarea(child, PT_AR_EC, &ec, 0) < 0
+- || access_uarea(child, PT_AR_LC, &lc, 0) < 0
+- || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0
+- || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0
+- || access_uarea(child, PT_CFM, &cfm, 0)
+- || access_uarea(child, PT_NAT_BITS, &nat_bits, 0))
+- return -EIO;
+-
+- /* control regs */
+-
+- retval |= __put_user(pt->cr_iip, &ppr->cr_iip);
+- retval |= __put_user(psr, &ppr->cr_ipsr);
+-
+- /* app regs */
+-
+- retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
+- retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
+- retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
+- retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
+- retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
+- retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
+-
+- retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]);
+- retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]);
+- retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]);
+- retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]);
+- retval |= __put_user(cfm, &ppr->cfm);
+-
+- /* gr1-gr3 */
+-
+- retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
+- retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
+-
+- /* gr4-gr7 */
+-
+- for (i = 4; i < 8; i++) {
+- if (unw_access_gr(&info, i, &val, &nat, 0) < 0)
+- return -EIO;
+- retval |= __put_user(val, &ppr->gr[i]);
+- }
+-
+- /* gr8-gr11 */
+-
+- retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4);
+-
+- /* gr12-gr15 */
+-
+- retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
+- retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
+- retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
+-
+- /* gr16-gr31 */
+-
+- retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16);
+-
+- /* b0 */
+-
+- retval |= __put_user(pt->b0, &ppr->br[0]);
+-
+- /* b1-b5 */
+-
+- for (i = 1; i < 6; i++) {
+- if (unw_access_br(&info, i, &val, 0) < 0)
+- return -EIO;
+- __put_user(val, &ppr->br[i]);
+- }
+-
+- /* b6-b7 */
+-
+- retval |= __put_user(pt->b6, &ppr->br[6]);
+- retval |= __put_user(pt->b7, &ppr->br[7]);
+-
+- /* fr2-fr5 */
+-
+- for (i = 2; i < 6; i++) {
+- if (unw_get_fr(&info, i, &fpval) < 0)
+- return -EIO;
+- retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
+- }
+-
+- /* fr6-fr11 */
+-
+- retval |= __copy_to_user(&ppr->fr[6], &pt->f6,
+- sizeof(struct ia64_fpreg) * 6);
+-
+- /* fp scratch regs(12-15) */
+-
+- retval |= __copy_to_user(&ppr->fr[12], &sw->f12,
+- sizeof(struct ia64_fpreg) * 4);
+-
+- /* fr16-fr31 */
+-
+- for (i = 16; i < 32; i++) {
+- if (unw_get_fr(&info, i, &fpval) < 0)
+- return -EIO;
+- retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
+- }
+-
+- /* fph */
+-
+- ia64_flush_fph(child);
+- retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph,
+- sizeof(ppr->fr[32]) * 96);
+-
+- /* preds */
+-
+- retval |= __put_user(pt->pr, &ppr->pr);
+-
+- /* nat bits */
+-
+- retval |= __put_user(nat_bits, &ppr->nat);
+-
+- ret = retval ? -EIO : 0;
+- return ret;
+-}
+-#endif /* ptrace_getregs() */
+-
+-#if 0
+-static long
+-ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
+-{
+- unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
+- struct unw_frame_info info;
+- struct switch_stack *sw;
+- struct ia64_fpreg fpval;
+- struct pt_regs *pt;
+- long ret, retval = 0;
+- int i;
+-
+- memset(&fpval, 0, sizeof(fpval));
+-
+- if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs)))
+- return -EIO;
+-
+- pt = task_pt_regs(child);
+- sw = (struct switch_stack *) (child->thread.ksp + 16);
+- unw_init_from_blocked_task(&info, child);
+- if (unw_unwind_to_user(&info) < 0) {
+- return -EIO;
+- }
+-
+- if (((unsigned long) ppr & 0x7) != 0) {
+- dprintk("ptrace:unaligned register address %p\n", ppr);
+- return -EIO;
+- }
+-
+- /* control regs */
+-
+- retval |= __get_user(pt->cr_iip, &ppr->cr_iip);
+- retval |= __get_user(psr, &ppr->cr_ipsr);
+-
+- /* app regs */
+-
+- retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
+- retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
+- retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
+- retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
+- retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
+- retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
+-
+- retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]);
+- retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]);
+- retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]);
+- retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]);
+- retval |= __get_user(cfm, &ppr->cfm);
+-
+- /* gr1-gr3 */
+-
+- retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
+- retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
+-
+- /* gr4-gr7 */
+-
+- for (i = 4; i < 8; i++) {
+- retval |= __get_user(val, &ppr->gr[i]);
+- /* NaT bit will be set via PT_NAT_BITS: */
+- if (unw_set_gr(&info, i, val, 0) < 0)
+- return -EIO;
+- }
+-
+- /* gr8-gr11 */
+-
+- retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4);
+-
+- /* gr12-gr15 */
+-
+- retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
+- retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
+- retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
+-
+- /* gr16-gr31 */
+-
+- retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16);
+-
+- /* b0 */
+-
+- retval |= __get_user(pt->b0, &ppr->br[0]);
+-
+- /* b1-b5 */
+-
+- for (i = 1; i < 6; i++) {
+- retval |= __get_user(val, &ppr->br[i]);
+- unw_set_br(&info, i, val);
+- }
+-
+- /* b6-b7 */
+-
+- retval |= __get_user(pt->b6, &ppr->br[6]);
+- retval |= __get_user(pt->b7, &ppr->br[7]);
+-
+- /* fr2-fr5 */
+-
+- for (i = 2; i < 6; i++) {
+- retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval));
+- if (unw_set_fr(&info, i, fpval) < 0)
+- return -EIO;
+- }
+-
+- /* fr6-fr11 */
+-
+- retval |= __copy_from_user(&pt->f6, &ppr->fr[6],
+- sizeof(ppr->fr[6]) * 6);
+-
+- /* fp scratch regs(12-15) */
+-
+- retval |= __copy_from_user(&sw->f12, &ppr->fr[12],
+- sizeof(ppr->fr[12]) * 4);
+-
+- /* fr16-fr31 */
+-
+- for (i = 16; i < 32; i++) {
+- retval |= __copy_from_user(&fpval, &ppr->fr[i],
+- sizeof(fpval));
+- if (unw_set_fr(&info, i, fpval) < 0)
+- return -EIO;
+- }
+-
+- /* fph */
+-
+- ia64_sync_fph(child);
+- retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32],
+- sizeof(ppr->fr[32]) * 96);
+-
+- /* preds */
+-
+- retval |= __get_user(pt->pr, &ppr->pr);
+-
+- /* nat bits */
+-
+- retval |= __get_user(nat_bits, &ppr->nat);
+-
+- retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
+- retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
+- retval |= access_uarea(child, PT_AR_EC, &ec, 1);
+- retval |= access_uarea(child, PT_AR_LC, &lc, 1);
+- retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
+- retval |= access_uarea(child, PT_AR_BSP, &bsp, 1);
+- retval |= access_uarea(child, PT_CFM, &cfm, 1);
+- retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1);
+-
+- ret = retval ? -EIO : 0;
+- return ret;
+-}
+-#endif /* ptrace_setregs() */
+-
+-/*
+- * Called by kernel/ptrace.c when detaching..
+- *
+- * Make sure the single step bit is not set.
+- */
+-void
+-ptrace_disable (struct task_struct *child)
+-{
+- struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
+-
+- /* make sure the single step/taken-branch trap bits are not set: */
+- clear_tsk_thread_flag(child, TIF_SINGLESTEP);
+- child_psr->ss = 0;
+- child_psr->tb = 0;
+-}
+-
+-#if 0 /* XXX */
+-asmlinkage long
+-sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
+-{
+- struct pt_regs *pt;
+- unsigned long urbs_end, peek_or_poke;
+- struct task_struct *child;
+- struct switch_stack *sw;
+- long ret;
+-
+- lock_kernel();
+- ret = -EPERM;
+- if (request == PTRACE_TRACEME) {
+- ret = ptrace_traceme();
+- goto out;
+- }
+-
+- peek_or_poke = (request == PTRACE_PEEKTEXT
+- || request == PTRACE_PEEKDATA
+- || request == PTRACE_POKETEXT
+- || request == PTRACE_POKEDATA);
+- ret = -ESRCH;
+- read_lock(&tasklist_lock);
+- {
+- child = find_task_by_pid(pid);
+- if (child) {
+- if (peek_or_poke)
+- child = find_thread_for_addr(child, addr);
+- get_task_struct(child);
+- }
+- }
+- read_unlock(&tasklist_lock);
+- if (!child)
+- goto out;
+- ret = -EPERM;
+- if (pid == 1) /* no messing around with init! */
+- goto out_tsk;
+-
+- if (request == PTRACE_ATTACH) {
+- ret = ptrace_attach(child);
+- goto out_tsk;
+- }
+-
+- ret = ptrace_check_attach(child, request == PTRACE_KILL);
+- if (ret < 0)
+- goto out_tsk;
+-
+- pt = task_pt_regs(child);
+- sw = (struct switch_stack *) (child->thread.ksp + 16);
+-
+- switch (request) {
+- case PTRACE_PEEKTEXT:
+- case PTRACE_PEEKDATA:
+- /* read word at location addr */
+- urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
+- ret = ia64_peek(child, sw, urbs_end, addr, &data);
+- if (ret == 0) {
+- ret = data;
+- /* ensure "ret" is not mistaken as an error code: */
+- force_successful_syscall_return();
+- }
+- goto out_tsk;
+-
+- case PTRACE_POKETEXT:
+- case PTRACE_POKEDATA:
+- /* write the word at location addr */
+- urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
+- ret = ia64_poke(child, sw, urbs_end, addr, data);
+- goto out_tsk;
+-
+- case PTRACE_PEEKUSR:
+- /* read the word at addr in the USER area */
+- if (access_uarea(child, addr, &data, 0) < 0) {
+- ret = -EIO;
+- goto out_tsk;
+- }
+- ret = data;
+- /* ensure "ret" is not mistaken as an error code */
+- force_successful_syscall_return();
+- goto out_tsk;
+-
+- case PTRACE_POKEUSR:
+- /* write the word at addr in the USER area */
+- if (access_uarea(child, addr, &data, 1) < 0) {
+- ret = -EIO;
+- goto out_tsk;
+- }
+- ret = 0;
+- goto out_tsk;
+-
+- case PTRACE_OLD_GETSIGINFO:
+- /* for backwards-compatibility */
+- ret = ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
+- goto out_tsk;
+-
+- case PTRACE_OLD_SETSIGINFO:
+- /* for backwards-compatibility */
+- ret = ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
+- goto out_tsk;
+-
+- case PTRACE_SYSCALL:
+- /* continue and stop at next (return from) syscall */
+- case PTRACE_CONT:
+- /* restart after signal. */
+- ret = -EIO;
+- if (!valid_signal(data))
+- goto out_tsk;
+- if (request == PTRACE_SYSCALL)
+- set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+- else
+- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+- child->exit_code = data;
+-
+- /*
+- * Make sure the single step/taken-branch trap bits
+- * are not set:
+- */
+- clear_tsk_thread_flag(child, TIF_SINGLESTEP);
+- ia64_psr(pt)->ss = 0;
+- ia64_psr(pt)->tb = 0;
+-
+- wake_up_process(child);
+- ret = 0;
+- goto out_tsk;
+-
+- case PTRACE_KILL:
+- /*
+- * Make the child exit. Best I can do is send it a
+- * sigkill. Perhaps it should be put in the status
+- * that it wants to exit.
+- */
+- if (child->exit_state == EXIT_ZOMBIE)
+- /* already dead */
+- goto out_tsk;
+- child->exit_code = SIGKILL;
+-
+- ptrace_disable(child);
+- wake_up_process(child);
+- ret = 0;
+- goto out_tsk;
+-
+- case PTRACE_SINGLESTEP:
+- /* let child execute for one instruction */
+- case PTRACE_SINGLEBLOCK:
+- ret = -EIO;
+- if (!valid_signal(data))
+- goto out_tsk;
+-
+- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+- set_tsk_thread_flag(child, TIF_SINGLESTEP);
+- if (request == PTRACE_SINGLESTEP) {
+- ia64_psr(pt)->ss = 1;
+- } else {
+- ia64_psr(pt)->tb = 1;
+- }
+- child->exit_code = data;
+-
+- /* give it a chance to run. */
+- wake_up_process(child);
+- ret = 0;
+- goto out_tsk;
+-
+- case PTRACE_DETACH:
+- /* detach a process that was attached. */
+- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+- ret = ptrace_detach(child, data);
+- goto out_tsk;
+-
+- case PTRACE_GETREGS:
+- ret = ptrace_getregs(child,
+- (struct pt_all_user_regs __user *) data);
+- goto out_tsk;
+-
+- case PTRACE_SETREGS:
+- ret = ptrace_setregs(child,
+- (struct pt_all_user_regs __user *) data);
+- goto out_tsk;
+-
+- default:
+- ret = ptrace_request(child, request, addr, data);
+- goto out_tsk;
+- }
+- out_tsk:
+- put_task_struct(child);
+- out:
+- unlock_kernel();
+- return ret;
+-}
+-#endif
+
+ /* "asmlinkage" so the input arguments are preserved... */
+
+@@ -1667,6 +770,9 @@ syscall_trace_leave (long arg0, long arg
+ }
+ }
+
++
++#ifdef CONFIG_UTRACE
++
+ /* Utrace implementation starts here */
+
+ typedef struct utrace_get {
+@@ -2454,3 +1560,119 @@ const struct utrace_regset_view *utrace_
+ #endif
+ return &utrace_ia64_native;
+ }
++#endif /* CONFIG_UTRACE */
++
++
++#ifdef CONFIG_PTRACE
++
++#define WORD(member, num) \
++ offsetof(struct pt_all_user_regs, member), \
++ offsetof(struct pt_all_user_regs, member) + num * sizeof(long)
++static const struct ptrace_layout_segment pt_all_user_regs_layout[] = {
++ {WORD(nat, 1), 0, ELF_NAT_OFFSET},
++ {WORD(cr_iip, 1), 0, ELF_CR_IIP_OFFSET},
++ {WORD(cfm, 1), 0, ELF_CFM_OFFSET},
++ {WORD(cr_ipsr, 1), 0, ELF_CR_IPSR_OFFSET},
++ {WORD(pr, 1), 0, ELF_PR_OFFSET},
++ {WORD(gr[0], 1), -1, -1},
++ {WORD(gr[1], 31), 0, ELF_GR_OFFSET(1)},
++ {WORD(br[0], 8), 0, ELF_BR_OFFSET(0)},
++ {WORD(ar[0], 16), -1, -1},
++ {WORD(ar[PT_AUR_RSC], 4), 0, ELF_AR_RSC_OFFSET},
++ {WORD(ar[PT_AUR_RNAT+1], 12), -1, -1},
++ {WORD(ar[PT_AUR_CCV], 1), 0, ELF_AR_CCV_OFFSET},
++ {WORD(ar[PT_AUR_CCV+1], 3), -1, -1},
++ {WORD(ar[PT_AUR_UNAT], 1), 0, ELF_AR_UNAT_OFFSET},
++ {WORD(ar[PT_AUR_UNAT+1], 3), -1, -1},
++ {WORD(ar[PT_AUR_FPSR], 1), 0, ELF_AR_FPSR_OFFSET},
++ {WORD(ar[PT_AUR_FPSR+1], 23), -1, -1},
++ {WORD(ar[PT_AUR_PFS], 3), 0, ELF_AR_PFS_OFFSET},
++ {WORD(ar[PT_AUR_EC+1], 62), -1, -1},
++ {offsetof(struct pt_all_user_regs, fr[0]),
++ offsetof(struct pt_all_user_regs, fr[2]),
++ -1, -1},
++ {offsetof(struct pt_all_user_regs, fr[2]),
++ offsetof(struct pt_all_user_regs, fr[128]),
++ 1, 2 * sizeof(elf_fpreg_t)},
++ {0, 0, -1, 0}
++};
++#undef WORD
++
++#define NEXT(addr, sum) (addr + sum * sizeof(long))
++static const struct ptrace_layout_segment pt_uarea_layout[] = {
++ {PT_F32, PT_NAT_BITS, 1, ELF_FP_OFFSET(32)},
++ {PT_NAT_BITS, NEXT(PT_NAT_BITS, 1), 0, ELF_NAT_OFFSET},
++ {PT_F2, PT_F10, 1, ELF_FP_OFFSET(2)},
++ {PT_F10, PT_R4, 1, ELF_FP_OFFSET(10)},
++ {PT_R4, PT_B1, 0, ELF_GR_OFFSET(4)},
++ {PT_B1, PT_AR_EC, 0, ELF_BR_OFFSET(1)},
++ {PT_AR_EC, PT_AR_LC, 0, ELF_AR_EC_OFFSET},
++ {PT_AR_LC, NEXT(PT_AR_LC, 1), 0, ELF_AR_LC_OFFSET},
++ {PT_CR_IPSR, PT_CR_IIP, 0, ELF_CR_IPSR_OFFSET},
++ {PT_CR_IIP, PT_AR_UNAT, 0, ELF_CR_IIP_OFFSET},
++ {PT_AR_UNAT, PT_AR_PFS, 0, ELF_AR_UNAT_OFFSET},
++ {PT_AR_PFS, PT_AR_RSC, 0, ELF_AR_PFS_OFFSET},
++ {PT_AR_RSC, PT_AR_RNAT, 0, ELF_AR_RSC_OFFSET},
++ {PT_AR_RNAT, PT_AR_BSPSTORE, 0, ELF_AR_RNAT_OFFSET},
++ {PT_AR_BSPSTORE,PT_PR, 0, ELF_AR_BSPSTORE_OFFSET},
++ {PT_PR, PT_B6, 0, ELF_PR_OFFSET},
++ {PT_B6, PT_AR_BSP, 0, ELF_BR_OFFSET(6)},
++ {PT_AR_BSP, PT_R1, 0, ELF_AR_BSP_OFFSET},
++ {PT_R1, PT_R12, 0, ELF_GR_OFFSET(1)},
++ {PT_R12, PT_R8, 0, ELF_GR_OFFSET(12)},
++ {PT_R8, PT_R16, 0, ELF_GR_OFFSET(8)},
++ {PT_R16, PT_AR_CCV, 0, ELF_GR_OFFSET(16)},
++ {PT_AR_CCV, PT_AR_FPSR, 0, ELF_AR_CCV_OFFSET},
++ {PT_AR_FPSR, PT_B0, 0, ELF_AR_FPSR_OFFSET},
++ {PT_B0, PT_B7, 0, ELF_BR_OFFSET(0)},
++ {PT_B7, PT_F6, 0, ELF_BR_OFFSET(7)},
++ {PT_F6, PT_AR_CSD, 1, ELF_FP_OFFSET(6)},
++ {PT_AR_CSD, NEXT(PT_AR_CSD, 2), 0, ELF_AR_CSD_OFFSET},
++ {PT_DBR, NEXT(PT_DBR, 8), 2, 0},
++ {PT_IBR, NEXT(PT_IBR, 8), 2, 8 * sizeof(long)},
++ {0, 0, -1, 0}
++};
++#undef NEXT
++
++int arch_ptrace(long *request, struct task_struct *child,
++ struct utrace_attached_engine *engine,
++ unsigned long addr, unsigned long data, long *val)
++{
++ int ret = -ENOSYS;
++ switch (*request) {
++ case PTRACE_OLD_GETSIGINFO:
++ *request = PTRACE_GETSIGINFO;
++ break;
++ case PTRACE_OLD_SETSIGINFO:
++ *request = PTRACE_SETSIGINFO;
++ break;
++
++ case PTRACE_PEEKTEXT: /* read word at location addr. */
++ case PTRACE_PEEKDATA:
++ ret = access_process_vm(child, addr, val, sizeof(*val), 0);
++ ret = ret == sizeof(*val) ? 0 : -EIO;
++ break;
++
++ case PTRACE_PEEKUSR:
++ return ptrace_layout_access(child, engine,
++ utrace_native_view(current),
++ pt_uarea_layout,
++ addr, sizeof(long),
++ NULL, val, 0);
++ case PTRACE_POKEUSR:
++ return ptrace_pokeusr(child, engine,
++ pt_uarea_layout, addr, data);
++
++ case PTRACE_GETREGS:
++ case PTRACE_SETREGS:
++ return ptrace_layout_access(child, engine,
++ utrace_native_view(current),
++ pt_all_user_regs_layout,
++ 0, sizeof(struct pt_all_user_regs),
++ (void __user *) data, NULL,
++ *request == PTRACE_SETREGS);
++ }
++ return ret;
++}
++
++#endif /* CONFIG_PTRACE */
linux-2.6-utrace-ptrace-compat-s390.patch:
Index: linux-2.6-utrace-ptrace-compat-s390.patch
===================================================================
RCS file: linux-2.6-utrace-ptrace-compat-s390.patch
diff -N linux-2.6-utrace-ptrace-compat-s390.patch
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ linux-2.6-utrace-ptrace-compat-s390.patch 20 Jul 2007 18:48:03 -0000 1.3
@@ -0,0 +1,184 @@
+[PATCH 4c] utrace: s390 ptrace compatibility
+
+This patch implements ptrace compatibility for s390.
+
+Signed-off-by: Roland McGrath <roland at redhat.com>
+Signed-off-by: David Wilder <dwilder at us.ibm.com>
+
+---
+
+ arch/s390/kernel/ptrace.c | 151 +++++++++++++++++++++++++++++++++++++
+ arch/s390/kernel/compat_wrapper.S | 2
+ 2 files changed, 152 insertions(+), 1 deletions(-)
+
+--- linux-2.6/arch/s390/kernel/ptrace.c
++++ linux-2.6/arch/s390/kernel/ptrace.c
+@@ -575,6 +575,157 @@ const struct utrace_regset_view *utrace_
+ }
+
+
++#ifdef CONFIG_PTRACE
++static const struct ptrace_layout_segment s390_uarea[] = {
++ {PT_PSWMASK, PT_FPC, 0, 0},
++ {PT_FPC, PT_CR_9, 1, 0},
++ {PT_CR_9, PT_IEEE_IP, 2, 0},
++ {PT_IEEE_IP, sizeof(struct user), -1, -1},
++ {0, 0, -1, 0}
++};
++
++int arch_ptrace(long *request, struct task_struct *child,
++ struct utrace_attached_engine *engine,
++ unsigned long addr, unsigned long data, long *val)
++{
++ ptrace_area parea;
++ unsigned long tmp;
++ int copied;
++
++ switch (*request) {
++ case PTRACE_PEEKUSR:
++#ifdef CONFIG_64BIT
++ /*
++ * Stupid gdb peeks/pokes the access registers in 64 bit with
++ * an alignment of 4. Programmers from hell...
++ */
++ if (addr >= PT_ACR0 && addr < PT_ACR15) {
++ if (addr & 3)
++ return -EIO;
++ tmp = *(unsigned long *)
++ ((char *) child->thread.acrs + addr - PT_ACR0);
++ return put_user(tmp, (unsigned long __user *) data);
++ }
++ else if (addr == PT_ACR15) {
++ /*
++ * Very special case: old & broken 64 bit gdb reading
++ * from acrs[15]. Result is a 64 bit value. Read the
++ * 32 bit acrs[15] value and shift it by 32. Sick...
++ */
++ tmp = ((unsigned long) child->thread.acrs[15]) << 32;
++ return put_user(tmp, (unsigned long __user *) data);
++ }
++#endif
++ return ptrace_peekusr(child, engine, s390_uarea, addr, data);
++ case PTRACE_POKEUSR:
++#ifdef CONFIG_64BIT
++ if (addr >= PT_ACR0 && addr < PT_ACR15) {
++ if (addr & 3)
++ return -EIO;
++ *(unsigned long *) ((char *) child->thread.acrs
++ + addr - PT_ACR0) = data;
++ return 0;
++ }
++ else if (addr == PT_ACR15) {
++ /*
++ * Very special case: old & broken 64 bit gdb writing
++ * to acrs[15] with a 64 bit value. Ignore the lower
++ * half of the value and write the upper 32 bit to
++ * acrs[15]. Sick...
++ */
++ child->thread.acrs[15] = data >> 32;
++ return 0;
++ }
++#endif
++ return ptrace_pokeusr(child, engine, s390_uarea, addr, data);
++
++ case PTRACE_PEEKUSR_AREA:
++ case PTRACE_POKEUSR_AREA:
++ if (copy_from_user(&parea, (ptrace_area __user *) addr,
++ sizeof(parea)))
++ return -EFAULT;
++ if ((parea.kernel_addr | parea.len) & (sizeof(data) - 1))
++ return -EIO;
++ return ptrace_layout_access(child, engine,
++ utrace_native_view(current),
++ s390_uarea,
++ parea.kernel_addr, parea.len,
++ (void __user *) parea.process_addr,
++ NULL,
++ *request == PTRACE_POKEUSR_AREA);
++
++ case PTRACE_PEEKTEXT:
++ case PTRACE_PEEKDATA:
++ /* Remove high order bit from address (only for 31 bit). */
++ addr &= PSW_ADDR_INSN;
++ /* read word at location addr. */
++ copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
++ if (copied != sizeof(tmp))
++ return -EIO;
++ return put_user(tmp, (unsigned long __user *) data);
++
++ case PTRACE_POKETEXT:
++ case PTRACE_POKEDATA:
++ /* Remove high order bit from address (only for 31 bit). */
++ addr &= PSW_ADDR_INSN;
++ /* write the word at location addr. */
++ copied = access_process_vm(child, addr, &data, sizeof(data),1);
++ if (copied != sizeof(data))
++ return -EIO;
++ return 0;
++ }
++
++ return -ENOSYS;
++}
++
++#ifdef CONFIG_COMPAT
++static const struct ptrace_layout_segment s390_compat_uarea[] = {
++ {PT_PSWMASK / 2, PT_FPC / 2, 0, 0},
++ {PT_FPC / 2, PT_CR_9 / 2, 1, 0},
++ {PT_CR_9 / 2, PT_IEEE_IP / 2, 2, 0},
++ {PT_IEEE_IP / 2, sizeof(struct user32), -1, -1},
++ {0, 0, -1, 0}
++};
++
++int arch_compat_ptrace(compat_long_t *request,
++ struct task_struct *child,
++ struct utrace_attached_engine *engine,
++ compat_ulong_t addr, compat_ulong_t data,
++ compat_long_t *val)
++{
++ ptrace_area_emu31 parea;
++
++ switch (*request) {
++ case PTRACE_PEEKUSR:
++ return ptrace_compat_peekusr(child, engine, s390_compat_uarea,
++ addr, data);
++ case PTRACE_POKEUSR:
++ return ptrace_compat_pokeusr(child, engine, s390_compat_uarea,
++ addr, data);
++ case PTRACE_PEEKUSR_AREA:
++ case PTRACE_POKEUSR_AREA:
++ if (copy_from_user(&parea, ((ptrace_area_emu31 __user *)
++ (unsigned long) addr),
++ sizeof(parea)))
++ return -EFAULT;
++ if ((parea.kernel_addr | parea.len) & (sizeof(data) - 1))
++ return -EIO;
++ return ptrace_layout_access(child, engine,
++ utrace_native_view(current),
++ s390_compat_uarea,
++ parea.kernel_addr, parea.len,
++ (void __user *)
++ (unsigned long) parea.process_addr,
++ NULL,
++ *request == PTRACE_POKEUSR_AREA);
++ }
++
++ return -ENOSYS;
++}
++#endif /* CONFIG_COMPAT */
++#endif /* CONFIG_PTRACE */
++
++
+ asmlinkage void
+ syscall_trace(struct pt_regs *regs, int entryexit)
+ {
+--- linux-2.6/arch/s390/kernel/compat_wrapper.S
++++ linux-2.6/arch/s390/kernel/compat_wrapper.S
+@@ -121,7 +121,7 @@ sys32_ptrace_wrapper:
+ lgfr %r3,%r3 # long
+ llgtr %r4,%r4 # long
+ llgfr %r5,%r5 # long
+- jg sys_ptrace # branch to system call
++ jg compat_sys_ptrace # branch to system call
+
+ .globl sys32_alarm_wrapper
+ sys32_alarm_wrapper:
linux-2.6-utrace-ptrace-compat-sparc64.patch:
Index: linux-2.6-utrace-ptrace-compat-sparc64.patch
===================================================================
RCS file: linux-2.6-utrace-ptrace-compat-sparc64.patch
diff -N linux-2.6-utrace-ptrace-compat-sparc64.patch
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ linux-2.6-utrace-ptrace-compat-sparc64.patch 20 Jul 2007 18:48:03 -0000 1.3
@@ -0,0 +1,599 @@
+[PATCH 4b] utrace: sparc64 ptrace compatibility
+
+This patch implements ptrace compatibility for sparc64.
+
+Signed-off-by: Roland McGrath <roland at redhat.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+
+---
+
+ arch/sparc64/kernel/ptrace.c | 566 +++++++-----------------------------------
+ 1 files changed, 100 insertions(+), 466 deletions(-)
+
+--- linux-2.6/arch/sparc64/kernel/ptrace.c
++++ linux-2.6/arch/sparc64/kernel/ptrace.c
+@@ -667,484 +667,118 @@ void flush_ptrace_access(struct vm_area_
+ }
+ }
+
+-#if 0 /* XXX */
+-asmlinkage void do_ptrace(struct pt_regs *regs)
++#ifdef CONFIG_PTRACE
++static const struct ptrace_layout_segment sparc64_getregs_layout[] = {
++ { 0, offsetof(struct pt_regs, u_regs[15]), 0, sizeof(long) },
++ { offsetof(struct pt_regs, u_regs[15]),
++ offsetof(struct pt_regs, tstate),
++ -1, 0 },
++ { offsetof(struct pt_regs, tstate), offsetof(struct pt_regs, y),
++ 0, 32 * sizeof(long) },
++ {0, 0, -1, 0}
++};
++
++int arch_ptrace(long *request, struct task_struct *child,
++ struct utrace_attached_engine *engine,
++ unsigned long addr, unsigned long data,
++ long *retval)
+ {
+- int request = regs->u_regs[UREG_I0];
+- pid_t pid = regs->u_regs[UREG_I1];
+- unsigned long addr = regs->u_regs[UREG_I2];
+- unsigned long data = regs->u_regs[UREG_I3];
+- unsigned long addr2 = regs->u_regs[UREG_I4];
+- struct task_struct *child;
+- int ret;
+-
+- if (test_thread_flag(TIF_32BIT)) {
+- addr &= 0xffffffffUL;
+- data &= 0xffffffffUL;
+- addr2 &= 0xffffffffUL;
+- }
+- lock_kernel();
+-#ifdef DEBUG_PTRACE
+- {
+- char *s;
+-
+- if ((request >= 0) && (request <= 24))
+- s = pt_rq [request];
+- else
+- s = "unknown";
+-
+- if (request == PTRACE_POKEDATA && data == 0x91d02001){
+- printk ("do_ptrace: breakpoint pid=%d, addr=%016lx addr2=%016lx\n",
+- pid, addr, addr2);
+- } else
+- printk("do_ptrace: rq=%s(%d) pid=%d addr=%016lx data=%016lx addr2=%016lx\n",
+- s, request, pid, addr, data, addr2);
+- }
+-#endif
+- if (request == PTRACE_TRACEME) {
+- ret = ptrace_traceme();
+- if (ret < 0)
+- pt_error_return(regs, -ret);
+- else
+- pt_succ_return(regs, 0);
+- goto out;
+- }
+-
+- child = ptrace_get_task_struct(pid);
+- if (IS_ERR(child)) {
+- ret = PTR_ERR(child);
+- pt_error_return(regs, -ret);
+- goto out;
+- }
+-
+- if ((current->personality == PER_SUNOS && request == PTRACE_SUNATTACH)
+- || (current->personality != PER_SUNOS && request == PTRACE_ATTACH)) {
+- if (ptrace_attach(child)) {
+- pt_error_return(regs, EPERM);
+- goto out_tsk;
+- }
+- pt_succ_return(regs, 0);
+- goto out_tsk;
+- }
+-
+- ret = ptrace_check_attach(child, request == PTRACE_KILL);
+- if (ret < 0) {
+- pt_error_return(regs, -ret);
+- goto out_tsk;
+- }
+-
+- if (!(test_thread_flag(TIF_32BIT)) &&
+- ((request == PTRACE_READDATA64) ||
+- (request == PTRACE_WRITEDATA64) ||
+- (request == PTRACE_READTEXT64) ||
+- (request == PTRACE_WRITETEXT64) ||
+- (request == PTRACE_PEEKTEXT64) ||
+- (request == PTRACE_POKETEXT64) ||
+- (request == PTRACE_PEEKDATA64) ||
+- (request == PTRACE_POKEDATA64))) {
+- addr = regs->u_regs[UREG_G2];
+- addr2 = regs->u_regs[UREG_G3];
+- request -= 30; /* wheee... */
+- }
+-
+- switch(request) {
+- case PTRACE_PEEKUSR:
+- if (addr != 0)
+- pt_error_return(regs, EIO);
+- else
+- pt_succ_return(regs, 0);
+- goto out_tsk;
+-
+- case PTRACE_PEEKTEXT: /* read word at location addr. */
+- case PTRACE_PEEKDATA: {
+- unsigned long tmp64;
+- unsigned int tmp32;
+- int res, copied;
+-
+- res = -EIO;
+- if (test_thread_flag(TIF_32BIT)) {
+- copied = access_process_vm(child, addr,
+- &tmp32, sizeof(tmp32), 0);
+- tmp64 = (unsigned long) tmp32;
+- if (copied == sizeof(tmp32))
+- res = 0;
+- } else {
+- copied = access_process_vm(child, addr,
+- &tmp64, sizeof(tmp64), 0);
+- if (copied == sizeof(tmp64))
+- res = 0;
+- }
+- if (res < 0)
+- pt_error_return(regs, -res);
+- else
+- pt_os_succ_return(regs, tmp64, (void __user *) data);
+- goto out_tsk;
+- }
+-
+- case PTRACE_POKETEXT: /* write the word at location addr. */
+- case PTRACE_POKEDATA: {
+- unsigned long tmp64;
+- unsigned int tmp32;
+- int copied, res = -EIO;
+-
+- if (test_thread_flag(TIF_32BIT)) {
+- tmp32 = data;
+- copied = access_process_vm(child, addr,
+- &tmp32, sizeof(tmp32), 1);
+- if (copied == sizeof(tmp32))
+- res = 0;
+- } else {
+- tmp64 = data;
+- copied = access_process_vm(child, addr,
+- &tmp64, sizeof(tmp64), 1);
+- if (copied == sizeof(tmp64))
+- res = 0;
+- }
+- if (res < 0)
+- pt_error_return(regs, -res);
+- else
+- pt_succ_return(regs, res);
+- goto out_tsk;
+- }
+-
+- case PTRACE_GETREGS: {
+- struct pt_regs32 __user *pregs =
+- (struct pt_regs32 __user *) addr;
+- struct pt_regs *cregs = task_pt_regs(child);
+- int rval;
+-
+- if (__put_user(tstate_to_psr(cregs->tstate), (&pregs->psr)) ||
+- __put_user(cregs->tpc, (&pregs->pc)) ||
+- __put_user(cregs->tnpc, (&pregs->npc)) ||
+- __put_user(cregs->y, (&pregs->y))) {
+- pt_error_return(regs, EFAULT);
+- goto out_tsk;
+- }
+- for (rval = 1; rval < 16; rval++)
+- if (__put_user(cregs->u_regs[rval], (&pregs->u_regs[rval - 1]))) {
+- pt_error_return(regs, EFAULT);
+- goto out_tsk;
+- }
+- pt_succ_return(regs, 0);
+-#ifdef DEBUG_PTRACE
+- printk ("PC=%lx nPC=%lx o7=%lx\n", cregs->tpc, cregs->tnpc, cregs->u_regs [15]);
+-#endif
+- goto out_tsk;
+- }
+-
+- case PTRACE_GETREGS64: {
+- struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
+- struct pt_regs *cregs = task_pt_regs(child);
+- unsigned long tpc = cregs->tpc;
+- int rval;
+-
+- if ((task_thread_info(child)->flags & _TIF_32BIT) != 0)
+- tpc &= 0xffffffff;
+- if (__put_user(cregs->tstate, (&pregs->tstate)) ||
+- __put_user(tpc, (&pregs->tpc)) ||
+- __put_user(cregs->tnpc, (&pregs->tnpc)) ||
+- __put_user(cregs->y, (&pregs->y))) {
+- pt_error_return(regs, EFAULT);
+- goto out_tsk;
+- }
+- for (rval = 1; rval < 16; rval++)
+- if (__put_user(cregs->u_regs[rval], (&pregs->u_regs[rval - 1]))) {
+- pt_error_return(regs, EFAULT);
+- goto out_tsk;
+- }
+- pt_succ_return(regs, 0);
+-#ifdef DEBUG_PTRACE
+- printk ("PC=%lx nPC=%lx o7=%lx\n", cregs->tpc, cregs->tnpc, cregs->u_regs [15]);
+-#endif
+- goto out_tsk;
+- }
+-
+- case PTRACE_SETREGS: {
+- struct pt_regs32 __user *pregs =
+- (struct pt_regs32 __user *) addr;
+- struct pt_regs *cregs = task_pt_regs(child);
+- unsigned int psr, pc, npc, y;
+- int i;
+-
+- /* Must be careful, tracing process can only set certain
+- * bits in the psr.
+- */
+- if (__get_user(psr, (&pregs->psr)) ||
+- __get_user(pc, (&pregs->pc)) ||
+- __get_user(npc, (&pregs->npc)) ||
+- __get_user(y, (&pregs->y))) {
+- pt_error_return(regs, EFAULT);
+- goto out_tsk;
+- }
+- cregs->tstate &= ~(TSTATE_ICC);
+- cregs->tstate |= psr_to_tstate_icc(psr);
+- if (!((pc | npc) & 3)) {
+- cregs->tpc = pc;
+- cregs->tnpc = npc;
+- }
+- cregs->y = y;
+- for (i = 1; i < 16; i++) {
+- if (__get_user(cregs->u_regs[i], (&pregs->u_regs[i-1]))) {
+- pt_error_return(regs, EFAULT);
+- goto out_tsk;
+- }
+- }
+- pt_succ_return(regs, 0);
+- goto out_tsk;
+- }
+-
+- case PTRACE_SETREGS64: {
+- struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
+- struct pt_regs *cregs = task_pt_regs(child);
+- unsigned long tstate, tpc, tnpc, y;
+- int i;
+-
+- /* Must be careful, tracing process can only set certain
+- * bits in the psr.
+- */
+- if (__get_user(tstate, (&pregs->tstate)) ||
+- __get_user(tpc, (&pregs->tpc)) ||
+- __get_user(tnpc, (&pregs->tnpc)) ||
+- __get_user(y, (&pregs->y))) {
+- pt_error_return(regs, EFAULT);
+- goto out_tsk;
+- }
+- if ((task_thread_info(child)->flags & _TIF_32BIT) != 0) {
+- tpc &= 0xffffffff;
+- tnpc &= 0xffffffff;
+- }
+- tstate &= (TSTATE_ICC | TSTATE_XCC);
+- cregs->tstate &= ~(TSTATE_ICC | TSTATE_XCC);
+- cregs->tstate |= tstate;
+- if (!((tpc | tnpc) & 3)) {
+- cregs->tpc = tpc;
+- cregs->tnpc = tnpc;
+- }
+- cregs->y = y;
+- for (i = 1; i < 16; i++) {
+- if (__get_user(cregs->u_regs[i], (&pregs->u_regs[i-1]))) {
+- pt_error_return(regs, EFAULT);
+- goto out_tsk;
+- }
+- }
+- pt_succ_return(regs, 0);
+- goto out_tsk;
+- }
+-
+- case PTRACE_GETFPREGS: {
+- struct fps {
+- unsigned int regs[32];
+- unsigned int fsr;
+- unsigned int flags;
+- unsigned int extra;
+- unsigned int fpqd;
+- struct fq {
+- unsigned int insnaddr;
+- unsigned int insn;
+- } fpq[16];
+- };
+- struct fps __user *fps = (struct fps __user *) addr;
+- unsigned long *fpregs = task_thread_info(child)->fpregs;
+-
+- if (copy_to_user(&fps->regs[0], fpregs,
+- (32 * sizeof(unsigned int))) ||
+- __put_user(task_thread_info(child)->xfsr[0], (&fps->fsr)) ||
+- __put_user(0, (&fps->fpqd)) ||
+- __put_user(0, (&fps->flags)) ||
+- __put_user(0, (&fps->extra)) ||
+- clear_user(&fps->fpq[0], 32 * sizeof(unsigned int))) {
+- pt_error_return(regs, EFAULT);
+- goto out_tsk;
+- }
+- pt_succ_return(regs, 0);
+- goto out_tsk;
+- }
+-
+- case PTRACE_GETFPREGS64: {
+- struct fps {
+- unsigned int regs[64];
+- unsigned long fsr;
+- };
+- struct fps __user *fps = (struct fps __user *) addr;
+- unsigned long *fpregs = task_thread_info(child)->fpregs;
+-
+- if (copy_to_user(&fps->regs[0], fpregs,
+- (64 * sizeof(unsigned int))) ||
+- __put_user(task_thread_info(child)->xfsr[0], (&fps->fsr))) {
+- pt_error_return(regs, EFAULT);
+- goto out_tsk;
+- }
+- pt_succ_return(regs, 0);
+- goto out_tsk;
+- }
+-
+- case PTRACE_SETFPREGS: {
+- struct fps {
+- unsigned int regs[32];
+- unsigned int fsr;
+- unsigned int flags;
+- unsigned int extra;
+- unsigned int fpqd;
+- struct fq {
+- unsigned int insnaddr;
+- unsigned int insn;
+- } fpq[16];
+- };
+- struct fps __user *fps = (struct fps __user *) addr;
+- unsigned long *fpregs = task_thread_info(child)->fpregs;
+- unsigned fsr;
+-
+- if (copy_from_user(fpregs, &fps->regs[0],
+- (32 * sizeof(unsigned int))) ||
+- __get_user(fsr, (&fps->fsr))) {
+- pt_error_return(regs, EFAULT);
+- goto out_tsk;
+- }
+- task_thread_info(child)->xfsr[0] &= 0xffffffff00000000UL;
+- task_thread_info(child)->xfsr[0] |= fsr;
+- if (!(task_thread_info(child)->fpsaved[0] & FPRS_FEF))
+- task_thread_info(child)->gsr[0] = 0;
+- task_thread_info(child)->fpsaved[0] |= (FPRS_FEF | FPRS_DL);
+- pt_succ_return(regs, 0);
+- goto out_tsk;
+- }
+-
+- case PTRACE_SETFPREGS64: {
+- struct fps {
+- unsigned int regs[64];
+- unsigned long fsr;
+- };
+- struct fps __user *fps = (struct fps __user *) addr;
+- unsigned long *fpregs = task_thread_info(child)->fpregs;
+-
+- if (copy_from_user(fpregs, &fps->regs[0],
+- (64 * sizeof(unsigned int))) ||
+- __get_user(task_thread_info(child)->xfsr[0], (&fps->fsr))) {
+- pt_error_return(regs, EFAULT);
+- goto out_tsk;
+- }
+- if (!(task_thread_info(child)->fpsaved[0] & FPRS_FEF))
+- task_thread_info(child)->gsr[0] = 0;
+- task_thread_info(child)->fpsaved[0] |= (FPRS_FEF | FPRS_DL | FPRS_DU);
+- pt_succ_return(regs, 0);
+- goto out_tsk;
+- }
++ void __user *uaddr = (void __user *) addr;
++ struct pt_regs *uregs = uaddr;
++ int err = -ENOSYS;
++
++ switch (*request) {
++ case PTRACE_GETREGS64:
++ err = ptrace_layout_access(child, engine,
++ &utrace_sparc64_native_view,
++ sparc64_getregs_layout,
++ 0, offsetof(struct pt_regs, y),
++ uaddr, NULL, 0);
++ if (!err &&
++ (put_user(task_pt_regs(child)->y, &uregs->y) ||
++ put_user(task_pt_regs(child)->fprs, &uregs->fprs)))
++ err = -EFAULT;
++ break;
+
+- case PTRACE_READTEXT:
+- case PTRACE_READDATA: {
+- int res = ptrace_readdata(child, addr,
+- (char __user *)addr2, data);
+- if (res == data) {
+- pt_succ_return(regs, 0);
+- goto out_tsk;
+- }
+- if (res >= 0)
+- res = -EIO;
+- pt_error_return(regs, -res);
+- goto out_tsk;
+- }
++ case PTRACE_SETREGS64:
++ err = ptrace_layout_access(child, engine,
++ &utrace_sparc64_native_view,
++ sparc64_getregs_layout,
++ 0, offsetof(struct pt_regs, y),
++ uaddr, NULL, 1);
++ if (!err &&
++ (get_user(task_pt_regs(child)->y, &uregs->y) ||
++ get_user(task_pt_regs(child)->fprs, &uregs->fprs)))
++ err = -EFAULT;
++ break;
+
+- case PTRACE_WRITETEXT:
+- case PTRACE_WRITEDATA: {
+- int res = ptrace_writedata(child, (char __user *) addr2,
+- addr, data);
+- if (res == data) {
+- pt_succ_return(regs, 0);
+- goto out_tsk;
+- }
+- if (res >= 0)
+- res = -EIO;
+- pt_error_return(regs, -res);
+- goto out_tsk;
+- }
+- case PTRACE_SYSCALL: /* continue and stop at (return from) syscall */
+- addr = 1;
++ case PTRACE_GETFPREGS64:
++ case PTRACE_SETFPREGS64:
++ err = ptrace_regset_access(child, engine,
++ utrace_native_view(current),
++ 2, 0, 34 * sizeof(long), uaddr,
++ (*request == PTRACE_SETFPREGS64));
++ break;
+
+- case PTRACE_CONT: { /* restart after signal. */
+- if (!valid_signal(data)) {
+- pt_error_return(regs, EIO);
+- goto out_tsk;
+- }
+-
+- if (request == PTRACE_SYSCALL) {
+- set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+- } else {
+- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+- }
+-
+- child->exit_code = data;
+-#ifdef DEBUG_PTRACE
+- printk("CONT: %s [%d]: set exit_code = %x %lx %lx\n", child->comm,
+- child->pid, child->exit_code,
+- task_pt_regs(child)->tpc,
+- task_pt_regs(child)->tnpc);
+-
+-#endif
+- wake_up_process(child);
+- pt_succ_return(regs, 0);
+- goto out_tsk;
+- }
++ case PTRACE_SUNDETACH:
++ *request = PTRACE_DETACH;
++ break;
+
+-/*
+- * make the child exit. Best I can do is send it a sigkill.
+- * perhaps it should be put in the status that it wants to
+- * exit.
+- */
+- case PTRACE_KILL: {
+- if (child->exit_state == EXIT_ZOMBIE) { /* already dead */
+- pt_succ_return(regs, 0);
+- goto out_tsk;
+- }
+- child->exit_code = SIGKILL;
+- wake_up_process(child);
+- pt_succ_return(regs, 0);
+- goto out_tsk;
+- }
++ default:
++ break;
++ };
++ return err;
++}
+
+- case PTRACE_SUNDETACH: { /* detach a process that was attached. */
+- int error = ptrace_detach(child, data);
+- if (error) {
+- pt_error_return(regs, EIO);
+- goto out_tsk;
+- }
+- pt_succ_return(regs, 0);
+- goto out_tsk;
+- }
++#ifdef CONFIG_COMPAT
++static const struct ptrace_layout_segment sparc32_getregs_layout[] = {
++ { 0, offsetof(struct pt_regs32, u_regs[0]),
++ 0, GENREG32_PSR * sizeof(u32) },
++ { offsetof(struct pt_regs32, u_regs[0]),
++ offsetof(struct pt_regs32, u_regs[15]),
++ 0, 1 * sizeof(u32) },
++ { offsetof(struct pt_regs32, u_regs[15]), sizeof(struct pt_regs32),
++ -1, 0 },
++ {0, 0, -1, 0}
++};
++
++int arch_compat_ptrace(compat_long_t *request, struct task_struct *child,
++ struct utrace_attached_engine *engine,
++ compat_ulong_t addr, compat_ulong_t data,
++ compat_long_t *retval)
++{
++ void __user *uaddr = (void __user *) (unsigned long) addr;
++ int err = -ENOSYS;
+
+- /* PTRACE_DUMPCORE unsupported... */
++ switch (*request) {
++ case PTRACE_GETREGS:
++ case PTRACE_SETREGS:
++ err = ptrace_layout_access(child, engine,
++ &utrace_sparc32_view,
++ sparc32_getregs_layout,
++ 0, sizeof(struct pt_regs32),
++ uaddr, NULL,
++ (*request ==
++ PTRACE_SETREGS));
++ break;
+
+- case PTRACE_GETEVENTMSG: {
+- int err;
++ case PTRACE_GETFPREGS:
++ case PTRACE_SETFPREGS:
++ err = ptrace_whole_regset(child, engine, addr, 1,
++ (*request == PTRACE_SETFPREGS));
++ break;
+
+- if (test_thread_flag(TIF_32BIT))
+- err = put_user(child->ptrace_message,
+- (unsigned int __user *) data);
+- else
+- err = put_user(child->ptrace_message,
+- (unsigned long __user *) data);
+- if (err)
+- pt_error_return(regs, -err);
+- else
+- pt_succ_return(regs, 0);
++ case PTRACE_SUNDETACH:
++ *request = PTRACE_DETACH;
+ break;
+- }
+
+- default: {
+- int err = ptrace_request(child, request, addr, data);
+- if (err)
+- pt_error_return(regs, -err);
+- else
+- pt_succ_return(regs, 0);
+- goto out_tsk;
+- }
+- }
+-out_tsk:
+- if (child)
+- put_task_struct(child);
+-out:
+- unlock_kernel();
++ default:
++ break;
++ };
++ return err;
+ }
+-#endif
++#endif /* CONFIG_COMPAT */
++#endif /* CONFIG_PTRACE */
+
+ asmlinkage void syscall_trace(struct pt_regs *regs, int syscall_exit_p)
+ {
linux-2.6-utrace-ptrace-compat.patch:
View full diff with command:
/usr/bin/cvs -f diff -kk -u -N -r 1.2 -r 1.3 linux-2.6-utrace-ptrace-compat.patch
Index: linux-2.6-utrace-ptrace-compat.patch
===================================================================
RCS file: linux-2.6-utrace-ptrace-compat.patch
diff -N linux-2.6-utrace-ptrace-compat.patch
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ linux-2.6-utrace-ptrace-compat.patch 20 Jul 2007 18:48:03 -0000 1.3
@@ -0,0 +1,3154 @@
+[PATCH 4] utrace: ptrace compatibility
+
+ptrace compatibility on top of <linux/utrace.h> interfaces.
+This attempts to be precisely compatible with existing ptrace behavior.
+It does not extend, improve, or change it.
+
+The ptrace support is made an option, CONFIG_PTRACE. For now, noone will
+want to turn this off except maybe a bizarre embedded configuration. But
+it looks forward to a day when we can punt the ptrace system call completely.
+
+Signed-off-by: Roland McGrath <roland at redhat.com>
+
+---
+
+ arch/i386/kernel/ptrace.c | 40
+ arch/powerpc/kernel/ptrace.c | 250 ++++
+ arch/powerpc/kernel/signal_32.c | 52 +
+ arch/powerpc/lib/sstep.c | 3
+ arch/x86_64/ia32/ia32entry.S | 2
+ arch/x86_64/ia32/ptrace32.c | 56 -
+ arch/x86_64/kernel/ptrace.c | 46
+ fs/proc/base.c | 40
+ include/asm-x86_64/ptrace-abi.h | 3
+ include/asm-x86_64/tracehook.h | 1
+ include/linux/ptrace.h | 221 +++-
+ include/linux/sched.h | 4
+ init/Kconfig | 15
+ kernel/Makefile | 3
+ kernel/exit.c | 13
+ kernel/fork.c | 2
+ kernel/ptrace.c | 2051 +++++++++++++++++++++++++++++++++++++---
+ kernel/sys_ni.c | 4
+ 18 files changed, 2632 insertions(+), 174 deletions(-)
+
+Index: b/fs/proc/base.c
+===================================================================
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -151,6 +151,46 @@ static int get_nr_threads(struct task_st
+ return count;
+ }
+
++static int __ptrace_may_attach(struct task_struct *task)
++{
++ /* May we inspect the given task?
++ * This check is used both for attaching with ptrace
++ * and for allowing access to sensitive information in /proc.
++ *
++ * ptrace_attach denies several cases that /proc allows
++ * because setting up the necessary parent/child relationship
++ * or halting the specified task is impossible.
++ */
++ int dumpable = 0;
++ /* Don't let security modules deny introspection */
++ if (task == current)
++ return 0;
++ if (((current->uid != task->euid) ||
++ (current->uid != task->suid) ||
++ (current->uid != task->uid) ||
++ (current->gid != task->egid) ||
++ (current->gid != task->sgid) ||
++ (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
++ return -EPERM;
++ smp_rmb();
++ if (task->mm)
++ dumpable = task->mm->dumpable;
++ if (!dumpable && !capable(CAP_SYS_PTRACE))
++ return -EPERM;
++
++ return security_ptrace(current, task);
++}
++
++int ptrace_may_attach(struct task_struct *task)
++{
++ int err;
++ task_lock(task);
++ err = __ptrace_may_attach(task);
++ task_unlock(task);
++ return !err;
++}
++
++
+ static int proc_cwd_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
+ {
+ struct task_struct *task = get_proc_task(inode);
+Index: b/arch/i386/kernel/ptrace.c
+===================================================================
+--- a/arch/i386/kernel/ptrace.c
++++ b/arch/i386/kernel/ptrace.c
+@@ -735,6 +735,46 @@ const struct utrace_regset_view *utrace_
+ return &utrace_i386_native;
+ }
+
++#ifdef CONFIG_PTRACE
++static const struct ptrace_layout_segment i386_uarea[] = {
++ {0, FRAME_SIZE*4, 0, 0},
++ {FRAME_SIZE*4, offsetof(struct user, u_debugreg[0]), -1, 0},
++ {offsetof(struct user, u_debugreg[0]),
++ offsetof(struct user, u_debugreg[8]), 4, 0},
++ {0, 0, -1, 0}
++};
++
++int arch_ptrace(long *req, struct task_struct *child,
++ struct utrace_attached_engine *engine,
++ unsigned long addr, unsigned long data, long *val)
++{
++ switch (*req) {
++ case PTRACE_PEEKUSR:
++ return ptrace_peekusr(child, engine, i386_uarea, addr, data);
++ case PTRACE_POKEUSR:
++ return ptrace_pokeusr(child, engine, i386_uarea, addr, data);
++ case PTRACE_GETREGS:
++ return ptrace_whole_regset(child, engine, data, 0, 0);
++ case PTRACE_SETREGS:
++ return ptrace_whole_regset(child, engine, data, 0, 1);
++ case PTRACE_GETFPREGS:
++ return ptrace_whole_regset(child, engine, data, 1, 0);
++ case PTRACE_SETFPREGS:
++ return ptrace_whole_regset(child, engine, data, 1, 1);
++ case PTRACE_GETFPXREGS:
++ return ptrace_whole_regset(child, engine, data, 2, 0);
++ case PTRACE_SETFPXREGS:
++ return ptrace_whole_regset(child, engine, data, 2, 1);
++ case PTRACE_GET_THREAD_AREA:
++ case PTRACE_SET_THREAD_AREA:
++ return ptrace_onereg_access(child, engine,
++ utrace_native_view(current), 3,
++ addr, (void __user *)data,
++ *req == PTRACE_SET_THREAD_AREA);
++ }
++ return -ENOSYS;
++}
++#endif
+
+ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
+ {
+Index: b/arch/x86_64/ia32/ptrace32.c
+===================================================================
+--- a/arch/x86_64/ia32/ptrace32.c
++++ b/arch/x86_64/ia32/ptrace32.c
+@@ -166,11 +166,6 @@ static int getreg32(struct task_struct *
+
+ #undef R32
+
+-asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
+-{
+- return -ENOSYS;
+-}
+-
+ static int
+ ia32_genregs_get(struct task_struct *target,
+ const struct utrace_regset *regset,
+@@ -600,3 +595,54 @@ const struct utrace_regset_view utrace_i
+ .name = "i386", .e_machine = EM_386,
+ .regsets = ia32_regsets, .n = ARRAY_SIZE(ia32_regsets)
+ };
++
++
++#ifdef CONFIG_PTRACE
++/*
++ * This matches the arch/i386/kernel/ptrace.c definitions.
++ */
++
++static const struct ptrace_layout_segment ia32_uarea[] = {
++ {0, sizeof(struct user_regs_struct32), 0, 0},
++ {sizeof(struct user_regs_struct32),
++ offsetof(struct user32, u_debugreg[0]), -1, 0},
++ {offsetof(struct user32, u_debugreg[0]),
++ offsetof(struct user32, u_debugreg[8]), 4, 0},
++ {0, 0, -1, 0}
++};
++
++int arch_compat_ptrace(compat_long_t *req, struct task_struct *child,
++ struct utrace_attached_engine *engine,
++ compat_ulong_t addr, compat_ulong_t data,
++ compat_long_t *val)
++{
++ switch (*req) {
++ case PTRACE_PEEKUSR:
++ return ptrace_compat_peekusr(child, engine, ia32_uarea,
++ addr, data);
++ case PTRACE_POKEUSR:
++ return ptrace_compat_pokeusr(child, engine, ia32_uarea,
++ addr, data);
++ case PTRACE_GETREGS:
++ return ptrace_whole_regset(child, engine, data, 0, 0);
++ case PTRACE_SETREGS:
++ return ptrace_whole_regset(child, engine, data, 0, 1);
++ case PTRACE_GETFPREGS:
++ return ptrace_whole_regset(child, engine, data, 1, 0);
++ case PTRACE_SETFPREGS:
++ return ptrace_whole_regset(child, engine, data, 1, 1);
++ case PTRACE_GETFPXREGS:
[...2761 lines suppressed...]
++/*
++ * Convenience wrapper for doing access to a whole utrace_regset for ptrace.
++ */
++static inline int ptrace_whole_regset(struct task_struct *child,
++ struct utrace_attached_engine *engine,
++ long data, int setno, int write)
++{
++ return ptrace_regset_access(child, engine, utrace_native_view(current),
++ setno, 0, -1, (void __user *)data, write);
++}
++
++/*
++ * Convenience function doing access to a single slot in a utrace_regset.
++ * The regno value gives a slot number plus regset->bias.
++ * The value accessed is regset->size bytes long.
++ */
++extern int ptrace_onereg_access(struct task_struct *child,
++ struct utrace_attached_engine *engine,
++ const struct utrace_regset_view *view,
++ int setno, unsigned long regno,
++ void __user *data, int write);
++
++
++/*
++ * An array of these describes the layout of the virtual struct user
++ * accessed by PEEKUSR/POKEUSR, or the structure used by GETREGS et al.
++ * The array is terminated by an element with .end of zero.
++ * An element describes the range [.start, .end) of struct user offsets,
++ * measured in bytes; it maps to the regset in the view's regsets array
++ * at the index given by .regset, at .offset bytes into that regset's data.
++ * If .regset is -1, then the [.start, .end) range reads as zero
++ * if .offset is zero, and is skipped on read (user's buffer unchanged)
++ * if .offset is -1.
++ */
++struct ptrace_layout_segment {
++ unsigned int start, end, regset, offset;
++};
++
++/*
++ * Convenience function for doing access to a ptrace compatibility layout.
++ * The offset and size are in bytes.
++ */
++extern int ptrace_layout_access(struct task_struct *child,
++ struct utrace_attached_engine *engine,
++ const struct utrace_regset_view *view,
++ const struct ptrace_layout_segment layout[],
++ unsigned long offset, unsigned int size,
++ void __user *data, void *kdata, int write);
++
++
++/* Convenience wrapper for the common PTRACE_PEEKUSR implementation. */
++static inline int ptrace_peekusr(struct task_struct *child,
++ struct utrace_attached_engine *engine,
++ const struct ptrace_layout_segment layout[],
++ unsigned long addr, long data)
++{
++ return ptrace_layout_access(child, engine, utrace_native_view(current),
++ layout, addr, sizeof(long),
++ (unsigned long __user *)data, NULL, 0);
++}
++
++/* Convenience wrapper for the common PTRACE_PEEKUSR implementation. */
++static inline int ptrace_pokeusr(struct task_struct *child,
++ struct utrace_attached_engine *engine,
++ const struct ptrace_layout_segment layout[],
++ unsigned long addr, long data)
++{
++ return ptrace_layout_access(child, engine, utrace_native_view(current),
++ layout, addr, sizeof(long),
++ NULL, &data, 1);
++}
++
++/*
++ * Called in copy_process.
++ */
++static inline void ptrace_init_task(struct task_struct *tsk)
++{
++ INIT_LIST_HEAD(&tsk->ptracees);
++}
++
++/*
++ * Called in do_exit, after setting PF_EXITING, no locks are held.
++ */
++void ptrace_exit(struct task_struct *tsk);
++
++/*
++ * Called in do_wait, with tasklist_lock held for reading.
++ * This reports any ptrace-child that is ready as do_wait would a normal child.
++ * If there are no ptrace children, returns -ECHILD.
++ * If there are some ptrace children but none reporting now, returns 0.
++ * In those cases the tasklist_lock is still held so next_thread(tsk) works.
++ * For any other return value, tasklist_lock is released before return.
++ */
++int ptrace_do_wait(struct task_struct *tsk,
++ pid_t pid, int options, struct siginfo __user *infop,
++ int __user *stat_addr, struct rusage __user *rusagep);
++
++
++#ifdef CONFIG_COMPAT
++#include <linux/compat.h>
++
++extern int arch_compat_ptrace(compat_long_t *request,
++ struct task_struct *child,
++ struct utrace_attached_engine *engine,
++ compat_ulong_t a, compat_ulong_t d,
++ compat_long_t *retval);
++
++/* Convenience wrapper for the common PTRACE_PEEKUSR implementation. */
++static inline int ptrace_compat_peekusr(
++ struct task_struct *child, struct utrace_attached_engine *engine,
++ const struct ptrace_layout_segment layout[],
++ compat_ulong_t addr, compat_ulong_t data)
++{
++ compat_ulong_t *udata = (compat_ulong_t __user *) (unsigned long) data;
++ return ptrace_layout_access(child, engine, utrace_native_view(current),
++ layout, addr, sizeof(compat_ulong_t),
++ udata, NULL, 0);
++}
++
++/* Convenience wrapper for the common PTRACE_PEEKUSR implementation. */
++static inline int ptrace_compat_pokeusr(
++ struct task_struct *child, struct utrace_attached_engine *engine,
++ const struct ptrace_layout_segment layout[],
++ compat_ulong_t addr, compat_ulong_t data)
++{
++ return ptrace_layout_access(child, engine, utrace_native_view(current),
++ layout, addr, sizeof(compat_ulong_t),
++ NULL, &data, 1);
++}
++#endif /* CONFIG_COMPAT */
++
++#else /* no CONFIG_PTRACE */
++static inline void ptrace_init_task(struct task_struct *tsk) { }
++static inline void ptrace_exit(struct task_struct *tsk) { }
++static inline int ptrace_do_wait(struct task_struct *tsk,
++ pid_t pid, int options,
++ struct siginfo __user *infop,
++ int __user *stat_addr,
++ struct rusage __user *rusagep)
++{
++ return -ECHILD;
++}
++#endif /* CONFIG_PTRACE */
++
++
+ #ifndef force_successful_syscall_return
+ /*
+ * System call handlers that, upon successful completion, need to return a
+Index: b/include/asm-x86_64/tracehook.h
+===================================================================
+--- a/include/asm-x86_64/tracehook.h
++++ b/include/asm-x86_64/tracehook.h
+@@ -15,6 +15,7 @@
+
+ #include <linux/sched.h>
+ #include <asm/ptrace.h>
++#include <asm/proto.h>
+
+ /*
+ * See linux/tracehook.h for the descriptions of what these need to do.
+Index: b/include/asm-x86_64/ptrace-abi.h
+===================================================================
+--- a/include/asm-x86_64/ptrace-abi.h
++++ b/include/asm-x86_64/ptrace-abi.h
+@@ -48,4 +48,7 @@
+
+ #define PTRACE_ARCH_PRCTL 30 /* arch_prctl for child */
+
++#define PTRACE_SYSEMU 31
++#define PTRACE_SYSEMU_SINGLESTEP 32
++
+ #endif
+Index: b/init/Kconfig
+===================================================================
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -692,10 +692,21 @@ endmenu
+
+ menu "Process debugging support"
+
++config PTRACE
++ bool "Legacy ptrace system call interface"
++ default y
++ select UTRACE
++ depends on PROC_FS
++ help
++ Enable the ptrace system call.
++ This is traditionally used by debuggers like GDB,
++ and is used by UML and some other applications.
++ Unless you are very sure you won't run anything that needs it,
++ say Y.
++
+ config UTRACE
+ bool "Infrastructure for tracing and debugging user processes"
+- default y
+- depends on MODULES
++ default y if MODULES || PTRACE
+ help
+ Enable the utrace process tracing interface.
+ This is an internal kernel interface to track events in user
linux-2.6-utrace-regset-avr32.patch:
--- NEW FILE linux-2.6-utrace-regset-avr32.patch ---
[PATCH 2d] utrace: avr32 regset support
From: Haavard Skinnemoen <hskinnemoen at atmel.com>
Rip out most of the ptrace code for AVR32 and replace it with the much
nicer utrace stuff.
CC: Haavard Skinnemoen <hskinnemoen at atmel.com>
Signed-off-by: Roland McGrath <roland at redhat.com>
---
arch/avr32/kernel/ptrace.c | 108 ++++++++++++++++++--------------------------
1 files changed, 43 insertions(+), 65 deletions(-)
--- linux-2.6/arch/avr32/kernel/ptrace.c
+++ linux-2.6/arch/avr32/kernel/ptrace.c
@@ -14,94 +14,72 @@
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/user.h>
+#include <linux/tracehook.h>
+#include <asm/tracehook.h>
#include <asm/mmu_context.h>
#include <linux/kdebug.h>
+#ifdef CONFIG_UTRACE
+
static struct pt_regs *get_user_regs(struct task_struct *tsk)
{
return (struct pt_regs *)((unsigned long)task_stack_page(tsk) +
THREAD_SIZE - sizeof(struct pt_regs));
}
-#if 0
-/*
- * Read the word at offset "offset" into the task's "struct user". We
- * actually access the pt_regs struct stored on the kernel stack.
- */
-static int ptrace_read_user(struct task_struct *tsk, unsigned long offset,
- unsigned long __user *data)
+static int genregs_get(struct task_struct *target,
+ const struct utrace_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
{
- unsigned long *regs;
- unsigned long value;
-
- pr_debug("ptrace_read_user(%p, %#lx, %p)\n",
- tsk, offset, data);
-
- if (offset & 3 || offset >= sizeof(struct user)) {
- printk("ptrace_read_user: invalid offset 0x%08lx\n", offset);
- return -EIO;
- }
+ struct pt_regs *regs = get_user_regs(target);
- regs = (unsigned long *)get_user_regs(tsk);
-
- value = 0;
- if (offset < sizeof(struct pt_regs))
- value = regs[offset / sizeof(regs[0])];
-
- return put_user(value, data);
+ return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ regs, 0, -1);
}
-/*
- * Write the word "value" to offset "offset" into the task's "struct
- * user". We actually access the pt_regs struct stored on the kernel
- * stack.
- */
-static int ptrace_write_user(struct task_struct *tsk, unsigned long offset,
- unsigned long value)
+static int genregs_set(struct task_struct *target,
+ const struct utrace_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
{
- unsigned long *regs;
-
- if (offset & 3 || offset >= sizeof(struct user)) {
- printk("ptrace_write_user: invalid offset 0x%08lx\n", offset);
- return -EIO;
- }
-
- if (offset >= sizeof(struct pt_regs))
- return 0;
+ struct pt_regs *regs = get_user_regs(target);
- regs = (unsigned long *)get_user_regs(tsk);
- regs[offset / sizeof(regs[0])] = value;
-
- return 0;
+ return utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ regs, 0, -1);
}
-static int ptrace_getregs(struct task_struct *tsk, void __user *uregs)
-{
- struct pt_regs *regs = get_user_regs(tsk);
+static const struct utrace_regset native_regsets[] = {
+ {
+ .n = ELF_NGREG,
+ .size = sizeof(long),
+ .align = sizeof(long),
+ .get = genregs_get,
+ .set = genregs_set,
+ },
+ /*
+ * Other register sets that probably would make sense:
+ * - Coprocessor registers (8 coprocs with 16 registers each)
+ * - TLS stuff
+ */
+};
- return copy_to_user(uregs, regs, sizeof(*regs)) ? -EFAULT : 0;
-}
+static const struct utrace_regset_view utrace_avr32_native_view = {
+ .name = UTS_MACHINE,
+ .e_machine = ELF_ARCH,
+ .regsets = native_regsets,
+ .n = ARRAY_SIZE(native_regsets),
+};
-static int ptrace_setregs(struct task_struct *tsk, const void __user *uregs)
+const struct utrace_regset_view *utrace_native_view(struct task_struct *tsk)
{
- struct pt_regs newregs;
- int ret;
-
- ret = -EFAULT;
- if (copy_from_user(&newregs, uregs, sizeof(newregs)) == 0) {
- struct pt_regs *regs = get_user_regs(tsk);
-
- ret = -EINVAL;
- if (valid_user_regs(&newregs)) {
- *regs = newregs;
- ret = 0;
- }
- }
-
- return ret;
+ return &utrace_avr32_native_view;
}
+#endif /* CONFIG_UTRACE */
+
+#if 0
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
unsigned long tmp;
linux-2.6-utrace-regset-ia64.patch:
Index: linux-2.6-utrace-regset-ia64.patch
===================================================================
RCS file: linux-2.6-utrace-regset-ia64.patch
diff -N linux-2.6-utrace-regset-ia64.patch
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ linux-2.6-utrace-regset-ia64.patch 20 Jul 2007 18:48:03 -0000 1.3
@@ -0,0 +1,1448 @@
+[PATCH 2a] utrace: ia64 regset support
+
+This patch converts the machine-dependent ptrace code into utrace regset
+support for ia64.
+
+Signed-off-by: Roland McGrath <roland at redhat.com>
+CC: Anil S Keshavamurthy <anil.s.keshavamurthy at intel.com>
+Signed-off-by: Bibo Mao <bibo.mao at intel.com>
+
+---
+
+ arch/ia64/ia32/sys_ia32.c | 472 +++++++++++++++++++++++++
+ arch/ia64/kernel/ptrace.c | 804 ++++++++++++++++++++++++++++++++++++++++++
+ include/asm-ia64/tracehook.h | 7
+ include/asm-ia64/elf.h | 24 +
+ 4 files changed, 1305 insertions(+), 2 deletions(-)
+
+--- linux-2.6/arch/ia64/ia32/sys_ia32.c
++++ linux-2.6/arch/ia64/ia32/sys_ia32.c
+@@ -44,6 +44,7 @@
+ #include <linux/eventpoll.h>
+ #include <linux/personality.h>
+ #include <linux/ptrace.h>
++#include <linux/tracehook.h>
+ #include <linux/stat.h>
+ #include <linux/ipc.h>
+ #include <linux/capability.h>
+@@ -1868,6 +1869,477 @@ sys32_ptrace (int request, pid_t pid, un
+ }
+ #endif
+
++#ifdef CONFIG_UTRACE
++typedef struct utrace_get {
++ void *kbuf;
++ void __user *ubuf;
++} utrace_get_t;
++
++typedef struct utrace_set {
++ const void *kbuf;
++ const void __user *ubuf;
++} utrace_set_t;
++
++typedef struct utrace_getset {
++ struct task_struct *target;
++ const struct utrace_regset *regset;
++ union {
++ utrace_get_t get;
++ utrace_set_t set;
++ } u;
++ unsigned int pos;
++ unsigned int count;
++ int ret;
++} utrace_getset_t;
++
++static void getfpreg(struct task_struct *task, int regno,int *val)
++{
++ switch (regno / sizeof(int)) {
++ case 0: *val = task->thread.fcr & 0xffff; break;
++ case 1: *val = task->thread.fsr & 0xffff; break;
++ case 2: *val = (task->thread.fsr>>16) & 0xffff; break;
++ case 3: *val = task->thread.fir; break;
++ case 4: *val = (task->thread.fir>>32) & 0xffff; break;
++ case 5: *val = task->thread.fdr; break;
++ case 6: *val = (task->thread.fdr >> 32) & 0xffff; break;
++ }
++}
++
++static void setfpreg(struct task_struct *task, int regno, int val)
++{
++ switch (regno / sizeof(int)) {
++ case 0:
++ task->thread.fcr = (task->thread.fcr & (~0x1f3f))
++ | (val & 0x1f3f);
++ break;
++ case 1:
++ task->thread.fsr = (task->thread.fsr & (~0xffff)) | val;
++ break;
++ case 2:
++ task->thread.fsr = (task->thread.fsr & (~0xffff0000))
++ | (val << 16);
++ break;
++ case 3:
++ task->thread.fir = (task->thread.fir & (~0xffffffff)) | val;
++ break;
++ case 5:
++ task->thread.fdr = (task->thread.fdr & (~0xffffffff)) | val;
++ break;
++ }
++}
++
++static void access_fpreg_ia32(int regno, void *reg,
++ struct pt_regs *pt, struct switch_stack *sw,
++ int tos, int write)
++{
++ void *f;
++
++ if ((regno += tos) >= 8)
++ regno -= 8;
++ if (regno <= 4)
++ f = &pt->f8 + regno;
++ else if (regno <= 7)
++ f = &sw->f12 + (regno - 4);
++ else {
++ printk(" regno must be less than 7 \n");
++ return;
++ }
++
++ if (write)
++ memcpy(f, reg, sizeof(struct _fpreg_ia32));
++ else
++ memcpy(reg, f, sizeof(struct _fpreg_ia32));
++}
++
++static void do_fpregs_get(struct unw_frame_info *info, void *arg)
++{
++ utrace_getset_t *dst = arg;
++ struct task_struct *task = dst->target;
++ struct pt_regs *pt;
++ int start, end, tos;
++ char buf[80];
++
++ if (dst->count == 0 || unw_unwind_to_user(info) < 0)
++ return;
++ if (dst->pos < 7 * sizeof(int)) {
++ end = min((dst->pos + dst->count), (unsigned int)(7 * sizeof(int)));
++ for (start = dst->pos; start < end; start += sizeof(int))
++ getfpreg(task, start,(int *)( buf + start));
++ dst->ret = utrace_regset_copyout(&dst->pos, &dst->count,
++ &dst->u.get.kbuf, &dst->u.get.ubuf, buf,
++ 0, 7 * sizeof(int));
++ if (dst->ret || dst->count == 0)
++ return;
++ }
++ if (dst->pos < sizeof(struct ia32_user_i387_struct)) {
++ pt = task_pt_regs(task);
++ tos = (task->thread.fsr >> 11) & 7;
++ end = min(dst->pos + dst->count,
++ (unsigned int)(sizeof(struct ia32_user_i387_struct)));
++ start = (dst->pos - 7 * sizeof(int)) / sizeof(struct _fpreg_ia32);
++ end = (end - 7 * sizeof(int)) / sizeof(struct _fpreg_ia32);
++ for (; start < end; start++)
++ access_fpreg_ia32(start, (struct _fpreg_ia32 *)buf + start,
++ pt, info->sw, tos, 0);
++ dst->ret = utrace_regset_copyout(&dst->pos, &dst->count,
++ &dst->u.get.kbuf, &dst->u.get.ubuf,
++ buf, 7 * sizeof(int),
++ sizeof(struct ia32_user_i387_struct));
++ if (dst->ret || dst->count == 0)
++ return;
++ }
++}
++
++static void do_fpregs_set(struct unw_frame_info *info, void *arg)
++{
++ utrace_getset_t *dst = arg;
++ struct task_struct *task = dst->target;
++ struct pt_regs *pt;
++ char buf[80];
++ int end, start, tos;
++
++ if (dst->count == 0 || unw_unwind_to_user(info) < 0)
++ return;
++
++ if (dst->pos < 7 * sizeof(int)) {
++ start = dst->pos;
++ dst->ret = utrace_regset_copyin(&dst->pos, &dst->count,
++ &dst->u.set.kbuf, &dst->u.set.ubuf, buf,
++ 0, 7 * sizeof(int));
++ if (dst->ret)
++ return;
++ for (; start < dst->pos; start += sizeof(int))
++ setfpreg(task, start, *((int*)(buf + start)));
++ if (dst->count == 0)
++ return;
++ }
++ if (dst->pos < sizeof(struct ia32_user_i387_struct)) {
++ start = (dst->pos - 7 * sizeof(int)) / sizeof(struct _fpreg_ia32);
++ dst->ret = utrace_regset_copyin(&dst->pos, &dst->count,
++ &dst->u.set.kbuf, &dst->u.set.ubuf,
++ buf, 7 * sizeof(int),
++ sizeof(struct ia32_user_i387_struct));
++ if (dst->ret)
++ return;
++ pt = task_pt_regs(task);
++ tos = (task->thread.fsr >> 11) & 7;
++ end = (dst->pos - 7 * sizeof(int)) / sizeof(struct _fpreg_ia32);
++ for (; start < end; start++)
++ access_fpreg_ia32(start, (struct _fpreg_ia32 *)buf + start,
++ pt, info->sw, tos, 0);
++ if (dst->count == 0)
++ return;
++ }
++}
++
++#define OFFSET(member) ((int)(offsetof(struct ia32_user_fxsr_struct, member)))
++static void getfpxreg(struct task_struct *task, int start, int end, char *buf)
++{
++ int min_val;
++
++ min_val = min(end, OFFSET(fop));
++ while (start < min_val) {
++ if (start == OFFSET(cwd))
++ *((short *)buf) = task->thread.fcr & 0xffff;
++ else if (start == OFFSET(swd))
++ *((short *)buf) = task->thread.fsr & 0xffff;
++ else if (start == OFFSET(twd))
++ *((short *)buf) = (task->thread.fsr>>16) & 0xffff;
++ buf += 2;
++ start += 2;
++ }
++ /* skip fop element */
++ if (start == OFFSET(fop)) {
++ start += 2;
++ buf += 2;
++ }
++ while (start < end) {
++ if (start == OFFSET(fip))
++ *((int *)buf) = task->thread.fir;
++ else if (start == OFFSET(fcs))
++ *((int *)buf) = (task->thread.fir>>32) & 0xffff;
++ else if (start == OFFSET(foo))
++ *((int *)buf) = task->thread.fdr;
++ else if (start == OFFSET(fos))
++ *((int *)buf) = (task->thread.fdr>>32) & 0xffff;
++ else if (start == OFFSET(mxcsr))
++ *((int *)buf) = ((task->thread.fcr>>32) & 0xff80)
++ | ((task->thread.fsr>>32) & 0x3f);
++ buf += 4;
++ start += 4;
++ }
++}
++
++static void setfpxreg(struct task_struct *task, int start, int end, char *buf)
++{
++ int min_val, num32;
++ short num;
++ unsigned long num64;
++
++ min_val = min(end, OFFSET(fop));
++ while (start < min_val) {
++ num = *((short *)buf);
++ if (start == OFFSET(cwd)) {
++ task->thread.fcr = (task->thread.fcr & (~0x1f3f))
++ | (num & 0x1f3f);
++ } else if (start == OFFSET(swd)) {
++ task->thread.fsr = (task->thread.fsr & (~0xffff)) | num;
++ } else if (start == OFFSET(twd)) {
++ task->thread.fsr = (task->thread.fsr & (~0xffff0000)) | num;
++ }
++ buf += 2;
++ start += 2;
++ }
++ /* skip fop element */
++ if (start == OFFSET(fop)) {
++ start += 2;
++ buf += 2;
++ }
++ while (start < end) {
++ num32 = *((int *)buf);
++ if (start == OFFSET(fip))
++ task->thread.fir = (task->thread.fir & (~0xffffffff))
++ | num32;
++ else if (start == OFFSET(foo))
++ task->thread.fdr = (task->thread.fdr & (~0xffffffff))
++ | num32;
++ else if (start == OFFSET(mxcsr)) {
++ num64 = num32 & 0xff10;
++ task->thread.fcr = (task->thread.fcr & (~0xff1000000000UL))
++ | (num64<<32);
++ num64 = num32 & 0x3f;
++ task->thread.fsr = (task->thread.fsr & (~0x3f00000000UL))
++ | (num64<<32);
++ }
++ buf += 4;
++ start += 4;
++ }
++}
++
++static void do_fpxregs_get(struct unw_frame_info *info, void *arg)
++{
++ utrace_getset_t *dst = arg;
++ struct task_struct *task = dst->target;
++ struct pt_regs *pt;
++ char buf[128];
++ int start, end, tos;
++
++ if (dst->count == 0 || unw_unwind_to_user(info) < 0)
++ return;
++ if (dst->pos < OFFSET(st_space[0])) {
++ end = min(dst->pos + dst->count, (unsigned int)32);
++ getfpxreg(task, dst->pos, end, buf);
++ dst->ret = utrace_regset_copyout(&dst->pos, &dst->count,
++ &dst->u.get.kbuf, &dst->u.get.ubuf, buf,
++ 0, OFFSET(st_space[0]));
++ if (dst->ret || dst->count == 0)
++ return;
++ }
++ if (dst->pos < OFFSET(xmm_space[0])) {
++ pt = task_pt_regs(task);
++ tos = (task->thread.fsr >> 11) & 7;
++ end = min(dst->pos + dst->count,
++ (unsigned int)OFFSET(xmm_space[0]));
++ start = (dst->pos - OFFSET(st_space[0])) / 16;
++ end = (end - OFFSET(st_space[0])) / 16;
++ for (; start < end; start++)
++ access_fpreg_ia32(start, buf + 16 * start, pt,
++ info->sw, tos, 0);
++ dst->ret = utrace_regset_copyout(&dst->pos, &dst->count,
++ &dst->u.get.kbuf, &dst->u.get.ubuf,
++ buf, OFFSET(st_space[0]), OFFSET(xmm_space[0]));
++ if (dst->ret || dst->count == 0)
++ return;
++ }
++ if (dst->pos < OFFSET(padding[0]))
++ dst->ret = utrace_regset_copyout(&dst->pos, &dst->count,
++ &dst->u.get.kbuf, &dst->u.get.ubuf,
++ &info->sw->f16, OFFSET(xmm_space[0]),
++ OFFSET(padding[0]));
++}
++
++static void do_fpxregs_set(struct unw_frame_info *info, void *arg)
++{
++ utrace_getset_t *dst = arg;
++ struct task_struct *task = dst->target;
++ char buf[128];
++ int start, end;
++
++ if (dst->count == 0 || unw_unwind_to_user(info) < 0)
++ return;
++
++ if (dst->pos < OFFSET(st_space[0])) {
++ start = dst->pos;
++ dst->ret = utrace_regset_copyin(&dst->pos, &dst->count,
++ &dst->u.set.kbuf, &dst->u.set.ubuf,
++ buf, 0, OFFSET(st_space[0]));
++ if (dst->ret)
++ return;
++ setfpxreg(task, start, dst->pos, buf);
++ if (dst->count == 0)
++ return;
++ }
++ if (dst->pos < OFFSET(xmm_space[0])) {
++ struct pt_regs *pt;
++ int tos;
++ pt = task_pt_regs(task);
++ tos = (task->thread.fsr >> 11) & 7;
++ start = (dst->pos - OFFSET(st_space[0])) / 16;
++ dst->ret = utrace_regset_copyin(&dst->pos, &dst->count,
++ &dst->u.set.kbuf, &dst->u.set.ubuf,
++ buf, OFFSET(st_space[0]), OFFSET(xmm_space[0]));
++ if (dst->ret)
++ return;
++ end = (dst->pos - OFFSET(st_space[0])) / 16;
++ for (; start < end; start++)
++ access_fpreg_ia32(start, buf + 16 * start, pt, info->sw,
++ tos, 1);
++ if (dst->count == 0)
++ return;
++ }
++ if (dst->pos < OFFSET(padding[0]))
++ dst->ret = utrace_regset_copyin(&dst->pos, &dst->count,
++ &dst->u.set.kbuf, &dst->u.set.ubuf,
++ &info->sw->f16, OFFSET(xmm_space[0]),
++ OFFSET(padding[0]));
++}
++#undef OFFSET
++
++static int do_regset_call(void (*call)(struct unw_frame_info *, void *),
++ struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ const void *kbuf, const void __user *ubuf)
++{
++ utrace_getset_t info = { .target = target, .regset = regset,
++ .pos = pos, .count = count,
++ .u.set = { .kbuf = kbuf, .ubuf = ubuf },
++ .ret = 0 };
++
++ if (target == current)
++ unw_init_running(call, &info);
++ else {
++ struct unw_frame_info ufi;
++ memset(&ufi, 0, sizeof(ufi));
++ unw_init_from_blocked_task(&ufi, target);
++ (*call)(&ufi, &info);
++ }
++
++ return info.ret;
++}
++
++static int ia32_fpregs_get(struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ void *kbuf, void __user *ubuf)
++{
++ return do_regset_call(do_fpregs_get, target, regset, pos, count, kbuf, ubuf);
++}
++
++static int ia32_fpregs_set(struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ const void *kbuf, const void __user *ubuf)
++{
++ return do_regset_call(do_fpregs_set, target, regset, pos, count, kbuf, ubuf);
++}
++
++static int ia32_fpxregs_get(struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ void *kbuf, void __user *ubuf)
++{
++ return do_regset_call(do_fpxregs_get, target, regset, pos, count, kbuf, ubuf);
++}
++
++static int ia32_fpxregs_set(struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ const void *kbuf, const void __user *ubuf)
++{
++ return do_regset_call(do_fpxregs_set, target, regset, pos, count, kbuf, ubuf);
++}
++
++static int ia32_genregs_get(struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ void *kbuf, void __user *ubuf)
++{
++ if (kbuf) {
++ u32 *kp = kbuf;
++ while (count > 0) {
++ *kp++ = getreg(target, pos);
++ pos += 4;
++ count -= 4;
++ }
++ } else {
++ u32 __user *up = ubuf;
++ while (count > 0) {
++ if (__put_user(getreg(target, pos), up++))
++ return -EFAULT;
++ pos += 4;
++ count -= 4;
++ }
++ }
++ return 0;
++}
++
++static int ia32_genregs_set(struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ const void *kbuf, const void __user *ubuf)
++{
++ int ret = 0;
++
++ if (kbuf) {
++ const u32 *kp = kbuf;
++ while (!ret && count > 0) {
++ putreg(target, pos, *kp++);
++ pos += 4;
++ count -= 4;
++ }
++ } else {
++ const u32 __user *up = ubuf;
++ u32 val;
++ while (!ret && count > 0) {
++ ret = __get_user(val, up++);
++ if (!ret)
++ putreg(target, pos, val);
++ pos += 4;
++ count -= 4;
++ }
++ }
++ return ret;
++}
++
++/*
++ * This should match arch/i386/kernel/ptrace.c:native_regsets.
++ * XXX ioperm? vm86?
++ */
++static const struct utrace_regset ia32_regsets[] = {
++ {
++ .n = sizeof(struct user_regs_struct32)/4,
++ .size = 4, .align = 4,
++ .get = ia32_genregs_get, .set = ia32_genregs_set
++ },
++ {
++ .n = sizeof(struct ia32_user_i387_struct) / 4,
++ .size = 4, .align = 4,
++ .get = ia32_fpregs_get, .set = ia32_fpregs_set
++ },
++ {
++ .n = sizeof(struct ia32_user_fxsr_struct) / 4,
++ .size = 4, .align = 4,
++ .get = ia32_fpxregs_get, .set = ia32_fpxregs_set
++ },
++};
++
++const struct utrace_regset_view utrace_ia32_view = {
++ .name = "i386", .e_machine = EM_386,
++ .regsets = ia32_regsets, .n = ARRAY_SIZE(ia32_regsets)
++};
++#endif
++
+ typedef struct {
+ unsigned int ss_sp;
+ unsigned int ss_flags;
+--- linux-2.6/arch/ia64/kernel/ptrace.c
++++ linux-2.6/arch/ia64/kernel/ptrace.c
+@@ -3,6 +3,9 @@
+ *
+ * Copyright (C) 1999-2005 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm at hpl.hp.com>
++ * Copyright (C) 2006 Intel Co
++ * 2006-08-12 - IA64 Native Utrace implementation support added by
++ * Anil S Keshavamurthy <anil.s.keshavamurthy at intel.com>
+ *
+ * Derived from the x86 and Alpha versions.
+ */
+@@ -18,13 +21,16 @@
+ #include <linux/security.h>
+ #include <linux/audit.h>
+ #include <linux/signal.h>
++#include <linux/module.h>
+
++#include <asm/tracehook.h>
+ #include <asm/pgtable.h>
+ #include <asm/processor.h>
+ #include <asm/ptrace_offsets.h>
+ #include <asm/rse.h>
+ #include <asm/system.h>
+ #include <asm/uaccess.h>
++#include <asm/elf.h>
+ #include <asm/unwind.h>
+ #ifdef CONFIG_PERFMON
+ #include <asm/perfmon.h>
+@@ -548,6 +554,7 @@ ia64_sync_user_rbs (struct task_struct *
+ return 0;
+ }
+
++#if 0 /* XXX */
+ static inline int
+ thread_matches (struct task_struct *thread, unsigned long addr)
+ {
+@@ -620,6 +627,7 @@ find_thread_for_addr (struct task_struct
+ mmput(mm);
+ return child;
+ }
++#endif
+
+ /*
+ * Write f32-f127 back to task->thread.fph if it has been modified.
+@@ -664,6 +672,7 @@ ia64_sync_fph (struct task_struct *task)
+ psr->dfh = 1;
+ }
+
++#if 0
+ static int
+ access_fr (struct unw_frame_info *info, int regnum, int hi,
+ unsigned long *data, int write_access)
+@@ -682,6 +691,7 @@ access_fr (struct unw_frame_info *info,
+ *data = fpval.u.bits[hi];
+ return ret;
+ }
++#endif /* access_fr() */
+
+ /*
+ * Change the machine-state of CHILD such that it will return via the normal
+@@ -782,6 +792,7 @@ access_nat_bits (struct task_struct *chi
+ return 0;
+ }
+
++#if 0
+ static int
+ access_uarea (struct task_struct *child, unsigned long addr,
+ unsigned long *data, int write_access)
+@@ -1248,7 +1259,9 @@ ptrace_getregs (struct task_struct *chil
+ ret = retval ? -EIO : 0;
+ return ret;
+ }
++#endif /* ptrace_getregs() */
+
++#if 0
+ static long
+ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
+ {
+@@ -1394,6 +1407,7 @@ ptrace_setregs (struct task_struct *chil
+ ret = retval ? -EIO : 0;
+ return ret;
+ }
++#endif /* ptrace_setregs() */
+
+ /*
+ * Called by kernel/ptrace.c when detaching..
+@@ -1411,6 +1425,7 @@ ptrace_disable (struct task_struct *chil
+ child_psr->tb = 0;
+ }
+
++#if 0 /* XXX */
+ asmlinkage long
+ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
+ {
+@@ -1598,6 +1613,7 @@ sys_ptrace (long request, pid_t pid, uns
+ unlock_kernel();
+ return ret;
+ }
++#endif
+
+ /* "asmlinkage" so the input arguments are preserved... */
+
+@@ -1650,3 +1666,791 @@ syscall_trace_leave (long arg0, long arg
+ tracehook_report_syscall_step(®s);
+ }
+ }
++
++/* Utrace implementation starts here */
++
++typedef struct utrace_get {
++ void *kbuf;
++ void __user *ubuf;
++} utrace_get_t;
++
++typedef struct utrace_set {
++ const void *kbuf;
++ const void __user *ubuf;
++} utrace_set_t;
++
++typedef struct utrace_getset {
++ struct task_struct *target;
++ const struct utrace_regset *regset;
++ union {
++ utrace_get_t get;
++ utrace_set_t set;
++ } u;
++ unsigned int pos;
++ unsigned int count;
++ int ret;
++} utrace_getset_t;
++
++static int
++access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info,
++ unsigned long addr, unsigned long *data, int write_access)
++{
++ struct pt_regs *pt;
++ unsigned long *ptr = NULL;
++ int ret;
++ char nat=0;
++
++ pt = task_pt_regs(target);
++ switch (addr) {
++ case ELF_GR_OFFSET(1):
++ ptr = &pt->r1;
++ break;
++ case ELF_GR_OFFSET(2):
++ case ELF_GR_OFFSET(3):
++ ptr = (void *)&pt->r2 + (addr - ELF_GR_OFFSET(2));
++ break;
++ case ELF_GR_OFFSET(4) ... ELF_GR_OFFSET(7):
++ if (write_access) {
++ /* read NaT bit first: */
++ unsigned long dummy;
++
++ ret = unw_get_gr(info, addr/8, &dummy, &nat);
++ if (ret < 0)
++ return ret;
++ }
++ return unw_access_gr(info, addr/8, data, &nat, write_access);
++ case ELF_GR_OFFSET(8) ... ELF_GR_OFFSET(11):
++ ptr = (void *)&pt->r8 + addr - ELF_GR_OFFSET(8);
++ break;
++ case ELF_GR_OFFSET(12):
++ case ELF_GR_OFFSET(13):
++ ptr = (void *)&pt->r12 + addr - ELF_GR_OFFSET(12);
++ break;
++ case ELF_GR_OFFSET(14):
++ ptr = &pt->r14;
++ break;
++ case ELF_GR_OFFSET(15):
++ ptr = &pt->r15;
++ }
++ if (write_access)
++ *ptr = *data;
++ else
++ *data = *ptr;
++ return 0;
++}
++
++static int
++access_elf_breg(struct task_struct *target, struct unw_frame_info *info,
++ unsigned long addr, unsigned long *data, int write_access)
++{
++ struct pt_regs *pt;
++ unsigned long *ptr = NULL;
++
++ pt = task_pt_regs(target);
++ switch (addr) {
++ case ELF_BR_OFFSET(0):
++ ptr = &pt->b0;
++ break;
++ case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5):
++ return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8,
++ data, write_access);
++ case ELF_BR_OFFSET(6):
++ ptr = &pt->b6;
++ break;
++ case ELF_BR_OFFSET(7):
++ ptr = &pt->b7;
++ }
++ if (write_access)
++ *ptr = *data;
++ else
++ *data = *ptr;
++ return 0;
++}
++
++static int
++access_elf_areg(struct task_struct *target, struct unw_frame_info *info,
++ unsigned long addr, unsigned long *data, int write_access)
++{
++ struct pt_regs *pt;
++ unsigned long cfm, urbs_end, rnat_addr;
++ unsigned long *ptr = NULL;
++
++ pt = task_pt_regs(target);
++ if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) {
++ switch (addr) {
++ case ELF_AR_RSC_OFFSET:
++ /* force PL3 */
++ if (write_access)
++ pt->ar_rsc = *data | (3 << 2);
++ else
++ *data = pt->ar_rsc;
++ return 0;
++ case ELF_AR_BSP_OFFSET:
++ /*
++ * By convention, we use PT_AR_BSP to refer to
++ * the end of the user-level backing store.
++ * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
++ * to get the real value of ar.bsp at the time
++ * the kernel was entered.
++ *
++ * Furthermore, when changing the contents of
++ * PT_AR_BSP (or PT_CFM) we MUST copy any
++ * users-level stacked registers that are
++ * stored on the kernel stack back to
++ * user-space because otherwise, we might end
++ * up clobbering kernel stacked registers.
++ * Also, if this happens while the task is
++ * blocked in a system call, which convert the
++ * state such that the non-system-call exit
++ * path is used. This ensures that the proper
++ * state will be picked up when resuming
++ * execution. However, it *also* means that
++ * once we write PT_AR_BSP/PT_CFM, it won't be
++ * possible to modify the syscall arguments of
++ * the pending system call any longer. This
++ * shouldn't be an issue because modifying
++ * PT_AR_BSP/PT_CFM generally implies that
++ * we're either abandoning the pending system
++ * call or that we defer it's re-execution
++ * (e.g., due to GDB doing an inferior
++ * function call).
++ */
++ urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
++ if (write_access) {
++ if (*data != urbs_end) {
++ if (ia64_sync_user_rbs(target, info->sw,
++ pt->ar_bspstore,
++ urbs_end) < 0)
++ return -1;
++ if (in_syscall(pt))
++ convert_to_non_syscall(target,
++ pt,
++ cfm);
++ /*
++ * Simulate user-level write
++ * of ar.bsp:
++ */
++ pt->loadrs = 0;
++ pt->ar_bspstore = *data;
++ }
++ } else
++ *data = urbs_end;
++ return 0;
++ case ELF_AR_BSPSTORE_OFFSET: // ar_bsp_store
++ ptr = &pt->ar_bspstore;
++ break;
++ case ELF_AR_RNAT_OFFSET: // ar_rnat
++ urbs_end = ia64_get_user_rbs_end(target, pt, NULL);
++ rnat_addr = (long) ia64_rse_rnat_addr((long *)
++ urbs_end);
++ if (write_access)
++ return ia64_poke(target, info->sw, urbs_end,
++ rnat_addr, *data);
++ else
++ return ia64_peek(target, info->sw, urbs_end,
++ rnat_addr, data);
++ case ELF_AR_CCV_OFFSET: // ar_ccv
++ ptr = &pt->ar_ccv;
++ break;
++ case ELF_AR_UNAT_OFFSET: // ar_unat
++ ptr = &pt->ar_unat;
++ break;
++ case ELF_AR_FPSR_OFFSET: // ar_fpsr
++ ptr = &pt->ar_fpsr;
++ break;
++ case ELF_AR_PFS_OFFSET: // ar_pfs
++ ptr = &pt->ar_pfs;
++ break;
++ case ELF_AR_LC_OFFSET: // ar_lc
++ return unw_access_ar(info, UNW_AR_LC, data,
++ write_access);
++ case ELF_AR_EC_OFFSET: // ar_ec
++ return unw_access_ar(info, UNW_AR_EC, data,
++ write_access);
++ case ELF_AR_CSD_OFFSET: // ar_csd
++ ptr = &pt->ar_csd;
++ break;
++ case ELF_AR_SSD_OFFSET: // ar_ssd
++ ptr = &pt->ar_ssd;
++ }
++ } else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) {
++ switch (addr) {
++ case ELF_CR_IIP_OFFSET:
++ ptr = &pt->cr_iip;
++ break;
++ case ELF_CFM_OFFSET:
++ urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
++ if (write_access) {
++ if (((cfm ^ *data) & PFM_MASK) != 0) {
++ if (ia64_sync_user_rbs(target, info->sw,
++ pt->ar_bspstore,
++ urbs_end) < 0)
++ return -1;
++ if (in_syscall(pt))
++ convert_to_non_syscall(target,
++ pt,
++ cfm);
++ pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
++ | (*data & PFM_MASK));
++ }
++ } else
++ *data = cfm;
++ return 0;
++ case ELF_CR_IPSR_OFFSET:
++ if (write_access)
++ pt->cr_ipsr = ((*data & IPSR_MASK)
++ | (pt->cr_ipsr & ~IPSR_MASK));
++ else
++ *data = (pt->cr_ipsr & IPSR_MASK);
++ return 0;
++ }
++ } else if (addr == ELF_NAT_OFFSET)
++ return access_nat_bits(target, pt, info,
++ data, write_access);
++ else if (addr == ELF_PR_OFFSET)
++ ptr = &pt->pr;
++ else
++ return -1;
++
++ if (write_access)
++ *ptr = *data;
++ else
++ *data = *ptr;
++
++ return 0;
++}
++
++static int
++access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
++ unsigned long addr, unsigned long *data, int write_access)
++{
++ if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(15))
++ return access_elf_gpreg(target, info, addr, data, write_access);
++ else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7))
++ return access_elf_breg(target, info, addr, data, write_access);
++ else
++ return access_elf_areg(target, info, addr, data, write_access);
++}
++
++void do_gpregs_get(struct unw_frame_info *info, void *arg)
++{
++ struct pt_regs *pt;
++ utrace_getset_t *dst = arg;
++ elf_greg_t tmp[16];
++ unsigned int i, index, min_copy;
++
++ if (unw_unwind_to_user(info) < 0)
++ return;
++
++ /*
++ * coredump format:
++ * r0-r31
++ * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
++ * predicate registers (p0-p63)
++ * b0-b7
++ * ip cfm user-mask
++ * ar.rsc ar.bsp ar.bspstore ar.rnat
++ * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
++ */
++
++
++ /* Skip r0 */
++ if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
++ dst->ret = utrace_regset_copyout_zero(&dst->pos, &dst->count,
++ &dst->u.get.kbuf,
++ &dst->u.get.ubuf,
++ 0, ELF_GR_OFFSET(1));
++ if (dst->ret || dst->count == 0)
++ return;
++ }
++
++ /* gr1 - gr15 */
++ if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
++ index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
++ min_copy = ELF_GR_OFFSET(16) > (dst->pos + dst->count) ?
++ (dst->pos + dst->count) : ELF_GR_OFFSET(16);
++ for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t), index++)
++ if (access_elf_reg(dst->target, info, i,
++ &tmp[index], 0) < 0) {
++ dst->ret = -EIO;
++ return;
++ }
++ dst->ret = utrace_regset_copyout(&dst->pos, &dst->count,
++ &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
++ ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
++ if (dst->ret || dst->count == 0)
++ return;
++ }
++
++ /* r16-r31 */
++ if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
++ pt = task_pt_regs(dst->target);
++ dst->ret = utrace_regset_copyout(&dst->pos, &dst->count,
++ &dst->u.get.kbuf, &dst->u.get.ubuf, &pt->r16,
++ ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
++ if (dst->ret || dst->count == 0)
++ return;
++ }
++
++ /* nat, pr, b0 - b7 */
++ if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
++ index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
++ min_copy = ELF_CR_IIP_OFFSET > (dst->pos + dst->count) ?
++ (dst->pos + dst->count) : ELF_CR_IIP_OFFSET;
++ for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t), index++)
++ if (access_elf_reg(dst->target, info, i,
++ &tmp[index], 0) < 0) {
++ dst->ret = -EIO;
++ return;
++ }
++ dst->ret = utrace_regset_copyout(&dst->pos, &dst->count,
++ &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
++ ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
++ if (dst->ret || dst->count == 0)
++ return;
++ }
++
++ /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
++ * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
++ */
++ if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
++ index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
++ min_copy = ELF_AR_END_OFFSET > (dst->pos + dst->count) ?
++ (dst->pos + dst->count) : ELF_AR_END_OFFSET;
++ for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t), index++)
++ if (access_elf_reg(dst->target, info, i,
++ &tmp[index], 0) < 0) {
++ dst->ret = -EIO;
++ return;
++ }
++ dst->ret = utrace_regset_copyout(&dst->pos, &dst->count,
++ &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
++ ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
++ }
++}
++
++void do_gpregs_set(struct unw_frame_info *info, void *arg)
++{
++ struct pt_regs *pt;
++ utrace_getset_t *dst = arg;
++ elf_greg_t tmp[16];
++ unsigned int i, index;
++
++ if (unw_unwind_to_user(info) < 0)
++ return;
++
++ /* Skip r0 */
++ if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
++ dst->ret = utrace_regset_copyin_ignore(&dst->pos, &dst->count,
++ &dst->u.set.kbuf,
++ &dst->u.set.ubuf,
++ 0, ELF_GR_OFFSET(1));
++ if (dst->ret || dst->count == 0)
++ return;
++ }
++
++ /* gr1-gr15 */
++ if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
++ i = dst->pos;
++ index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
++ dst->ret = utrace_regset_copyin(&dst->pos, &dst->count,
++ &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
++ ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
++ if (dst->ret)
++ return;
++ for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
++ if (access_elf_reg(dst->target, info, i,
++ &tmp[index], 1) < 0) {
++ dst->ret = -EIO;
++ return;
++ }
++ if (dst->count == 0)
++ return;
++ }
++
++ /* gr16-gr31 */
++ if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
++ pt = task_pt_regs(dst->target);
++ dst->ret = utrace_regset_copyin(&dst->pos, &dst->count,
++ &dst->u.set.kbuf, &dst->u.set.ubuf, &pt->r16,
++ ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
++ if (dst->ret || dst->count == 0)
++ return;
++ }
++
++ /* nat, pr, b0 - b7 */
++ if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
++ i = dst->pos;
++ index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
++ dst->ret = utrace_regset_copyin(&dst->pos, &dst->count,
++ &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
++ ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
++ if (dst->ret)
++ return;
++ for (; i < dst->pos; i += sizeof(elf_greg_t), index++)
++ if (access_elf_reg(dst->target, info, i,
++ &tmp[index], 1) < 0) {
++ dst->ret = -EIO;
++ return;
++ }
++ if (dst->count == 0)
++ return;
++ }
++
++ /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
++ * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
++ */
++ if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
++ i = dst->pos;
++ index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
++ dst->ret = utrace_regset_copyin(&dst->pos, &dst->count,
++ &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
++ ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
++ if (dst->ret)
++ return;
++ for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
++ if (access_elf_reg(dst->target, info, i,
++ &tmp[index], 1) < 0) {
++ dst->ret = -EIO;
++ return;
++ }
++ }
++}
++
++#define ELF_FP_OFFSET(i) (i * sizeof(elf_fpreg_t))
++
++void do_fpregs_get(struct unw_frame_info *info, void *arg)
++{
++ utrace_getset_t *dst = arg;
++ struct task_struct *task = dst->target;
++ elf_fpreg_t tmp[30];
++ int index, min_copy, i;
++
++ if (unw_unwind_to_user(info) < 0)
++ return;
++
++ /* Skip pos 0 and 1 */
++ if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
++ dst->ret = utrace_regset_copyout_zero(&dst->pos, &dst->count,
++ &dst->u.get.kbuf,
++ &dst->u.get.ubuf,
++ 0, ELF_FP_OFFSET(2));
++ if (dst->count == 0 || dst->ret)
++ return;
++ }
++
++ /* fr2-fr31 */
++ if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
++ index = (dst->pos - ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t);
++ min_copy = min(((unsigned int)ELF_FP_OFFSET(32)),
++ dst->pos + dst->count);
++ for (i = dst->pos; i < min_copy; i += sizeof(elf_fpreg_t), index++)
++ if (unw_get_fr(info, i / sizeof(elf_fpreg_t),
++ &tmp[index])) {
++ dst->ret = -EIO;
++ return;
++ }
++ dst->ret = utrace_regset_copyout(&dst->pos, &dst->count,
++ &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
++ ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
++ if (dst->count == 0 || dst->ret)
++ return;
++ }
++
++ /* fph */
++ if (dst->count > 0) {
++ ia64_flush_fph(dst->target);
++ if (task->thread.flags & IA64_THREAD_FPH_VALID)
++ dst->ret = utrace_regset_copyout(
++ &dst->pos, &dst->count,
++ &dst->u.get.kbuf, &dst->u.get.ubuf,
++ &dst->target->thread.fph,
++ ELF_FP_OFFSET(32), -1);
++ else
++ /* Zero fill instead. */
++ dst->ret = utrace_regset_copyout_zero(
++ &dst->pos, &dst->count,
++ &dst->u.get.kbuf, &dst->u.get.ubuf,
++ ELF_FP_OFFSET(32), -1);
++ }
++}
++
++void do_fpregs_set(struct unw_frame_info *info, void *arg)
++{
++ utrace_getset_t *dst = arg;
++ elf_fpreg_t fpreg, tmp[30];
++ int index, start, end;
++
++ if (unw_unwind_to_user(info) < 0)
++ return;
++
++ /* Skip pos 0 and 1 */
++ if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
++ dst->ret = utrace_regset_copyin_ignore(&dst->pos, &dst->count,
++ &dst->u.set.kbuf,
++ &dst->u.set.ubuf,
++ 0, ELF_FP_OFFSET(2));
++ if (dst->count == 0 || dst->ret)
++ return;
++ }
++
++ /* fr2-fr31 */
++ if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
++ start = dst->pos;
++ end = min(((unsigned int)ELF_FP_OFFSET(32)),
++ dst->pos + dst->count);
++ dst->ret = utrace_regset_copyin(&dst->pos, &dst->count,
++ &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
++ ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
++ if (dst->ret)
++ return;
++
++ if (start & 0xF) { // only write high part
++ if (unw_get_fr(info, start / sizeof(elf_fpreg_t),
++ &fpreg)) {
++ dst->ret = -EIO;
++ return;
++ }
++ tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0]
++ = fpreg.u.bits[0];
++ start &= ~0xFUL;
++ }
++ if (end & 0xF) { // only write low part
++ if (unw_get_fr(info, end / sizeof(elf_fpreg_t), &fpreg)) {
++ dst->ret = -EIO;
++ return;
++ }
++ tmp[end / sizeof(elf_fpreg_t) -2].u.bits[1]
++ = fpreg.u.bits[1];
++ end = (end + 0xF) & ~0xFUL;
++ }
++
++ for ( ; start < end ; start += sizeof(elf_fpreg_t)) {
++ index = start / sizeof(elf_fpreg_t);
++ if (unw_set_fr(info, index, tmp[index - 2])){
++ dst->ret = -EIO;
++ return;
++ }
++ }
++ if (dst->ret || dst->count == 0)
++ return;
++ }
++
++ /* fph */
++ if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) {
++ ia64_sync_fph(dst->target);
++ dst->ret = utrace_regset_copyin(&dst->pos, &dst->count,
++ &dst->u.set.kbuf,
++ &dst->u.set.ubuf,
++ &dst->target->thread.fph,
++ ELF_FP_OFFSET(32), -1);
++ }
++}
++
++static int
++do_regset_call(void (*call)(struct unw_frame_info *, void *),
++ struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ const void *kbuf, const void __user *ubuf)
++{
++ utrace_getset_t info = { .target = target, .regset = regset,
++ .pos = pos, .count = count,
++ .u.set = { .kbuf = kbuf, .ubuf = ubuf },
++ .ret = 0 };
++
++ if (target == current)
++ unw_init_running(call, &info);
++ else {
++ struct unw_frame_info ufi;
++ memset(&ufi, 0, sizeof(ufi));
++ unw_init_from_blocked_task(&ufi, target);
++ (*call)(&ufi, &info);
++ }
++
++ return info.ret;
++}
++
++static int
++gpregs_get(struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ void *kbuf, void __user *ubuf)
++{
++ return do_regset_call(do_gpregs_get, target, regset, pos, count, kbuf, ubuf);
++}
++
++static int gpregs_set(struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ const void *kbuf, const void __user *ubuf)
++{
++ return do_regset_call(do_gpregs_set, target, regset, pos, count, kbuf, ubuf);
++}
++
++static void do_gpregs_writeback(struct unw_frame_info *info, void *arg)
++{
++ struct pt_regs *pt;
++ utrace_getset_t *dst = arg;
++ unsigned long urbs_end;
++
++ if (unw_unwind_to_user(info) < 0)
++ return;
++ pt = task_pt_regs(dst->target);
++ urbs_end = ia64_get_user_rbs_end(dst->target, pt, NULL);
++ dst->ret = ia64_sync_user_rbs(dst->target, info->sw, pt->ar_bspstore, urbs_end);
++}
++/*
++ * This is called to write back the register backing store.
++ * ptrace does this before it stops, so that a tracer reading the user
++ * memory after the thread stops will get the current register data.
++ */
++static int
++gpregs_writeback(struct task_struct *target,
++ const struct utrace_regset *regset,
++ int now)
++{
++ return do_regset_call(do_gpregs_writeback, target, regset, 0, 0, NULL, NULL);
++}
++
++static int
++fpregs_active(struct task_struct *target, const struct utrace_regset *regset)
++{
++ return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32;
++}
++
++static int fpregs_get(struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ void *kbuf, void __user *ubuf)
++{
++ return do_regset_call(do_fpregs_get, target, regset, pos, count, kbuf, ubuf);
++}
++
++static int fpregs_set(struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ const void *kbuf, const void __user *ubuf)
++{
++ return do_regset_call(do_fpregs_set, target, regset, pos, count, kbuf, ubuf);
++}
++
++static int dbregs_get(struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ void *kbuf, void __user *ubuf)
++{
++ int ret;
++
++#ifdef CONFIG_PERFMON
++ /*
++ * Check if debug registers are used by perfmon. This
++ * test must be done once we know that we can do the
++ * operation, i.e. the arguments are all valid, but
++ * before we start modifying the state.
++ *
++ * Perfmon needs to keep a count of how many processes
++ * are trying to modify the debug registers for system
++ * wide monitoring sessions.
++ *
++ * We also include read access here, because they may
++ * cause the PMU-installed debug register state
++ * (dbr[], ibr[]) to be reset. The two arrays are also
++ * used by perfmon, but we do not use
++ * IA64_THREAD_DBG_VALID. The registers are restored
++ * by the PMU context switch code.
++ */
++ if (pfm_use_debug_registers(target))
++ return -EIO;
++#endif
++
++ if (!(target->thread.flags & IA64_THREAD_DBG_VALID))
++ ret = utrace_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
++ 0, -1);
++ else {
++ preempt_disable();
++ if (target == current)
++ ia64_load_debug_regs(&target->thread.dbr[0]);
++ preempt_enable_no_resched();
++ ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
++ &target->thread.dbr, 0, -1);
++ }
++
++ return ret;
++}
++
++static int dbregs_set(struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ const void *kbuf, const void __user *ubuf)
++{
++ int i, ret;
++
++#ifdef CONFIG_PERFMON
++ if (pfm_use_debug_registers(target))
++ return -EIO;
++#endif
++
++ ret = 0;
++ if (!(target->thread.flags & IA64_THREAD_DBG_VALID)){
++ target->thread.flags |= IA64_THREAD_DBG_VALID;
++ memset(target->thread.dbr, 0, 2 * sizeof(target->thread.dbr));
++ } else if (target == current){
++ preempt_disable();
++ ia64_save_debug_regs(&target->thread.dbr[0]);
++ preempt_enable_no_resched();
++ }
++
++ ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
++ &target->thread.dbr, 0, -1);
++
++ for (i = 1; i < IA64_NUM_DBG_REGS; i += 2) {
++ target->thread.dbr[i] &= ~(7UL << 56);
++ target->thread.ibr[i] &= ~(7UL << 56);
++ }
++
++ if (ret)
++ return ret;
++
++ if (target == current){
++ preempt_disable();
++ ia64_load_debug_regs(&target->thread.dbr[0]);
++ preempt_enable_no_resched();
++ }
++ return 0;
++}
++
++static const struct utrace_regset native_regsets[] = {
++ {
++ .n = ELF_NGREG,
++ .size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t),
++ .get = gpregs_get, .set = gpregs_set,
++ .writeback = gpregs_writeback
++ },
++ {
++ .n = ELF_NFPREG,
++ .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t),
++ .get = fpregs_get, .set = fpregs_set, .active = fpregs_active
++ },
++ {
++ .n = 2 * IA64_NUM_DBG_REGS, .size = sizeof(long),
++ .align = sizeof(long),
++ .get = dbregs_get, .set = dbregs_set
++ }
++};
++
++static const struct utrace_regset_view utrace_ia64_native = {
++ .name = "ia64",
++ .e_machine = EM_IA_64,
++ .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
++};
++
++const struct utrace_regset_view *utrace_native_view(struct task_struct *tsk)
++{
++#ifdef CONFIG_IA32_SUPPORT
++ extern const struct utrace_regset_view utrace_ia32_view;
++ if (IS_IA32_PROCESS(task_pt_regs(tsk)))
++ return &utrace_ia32_view;
++#endif
++ return &utrace_ia64_native;
++}
+--- linux-2.6/include/asm-ia64/tracehook.h
++++ linux-2.6/include/asm-ia64/tracehook.h
+@@ -67,7 +67,10 @@ static inline int tracehook_single_step_
+
+ static inline void tracehook_abort_syscall(struct pt_regs *regs)
+ {
+- regs->r15 = -1L;
++ if (IS_IA32_PROCESS(regs))
++ regs->r1 = -1UL;
++ else
++ regs->r15 = -1UL;
+ }
+
+-#endif
++#endif /* asm/tracehook.h */
+--- linux-2.6/include/asm-ia64/elf.h
++++ linux-2.6/include/asm-ia64/elf.h
+@@ -154,6 +154,30 @@ extern void ia64_init_addr_space (void);
+ #define ELF_NGREG 128 /* we really need just 72 but let's leave some headroom... */
+ #define ELF_NFPREG 128 /* f0 and f1 could be omitted, but so what... */
+
++/* elf_gregset_t register offsets */
++#define ELF_GR_0_OFFSET 0
++#define ELF_NAT_OFFSET (32 * sizeof(elf_greg_t))
++#define ELF_PR_OFFSET (33 * sizeof(elf_greg_t))
++#define ELF_BR_0_OFFSET (34 * sizeof(elf_greg_t))
++#define ELF_CR_IIP_OFFSET (42 * sizeof(elf_greg_t))
++#define ELF_CFM_OFFSET (43 * sizeof(elf_greg_t))
++#define ELF_CR_IPSR_OFFSET (44 * sizeof(elf_greg_t))
++#define ELF_GR_OFFSET(i) (ELF_GR_0_OFFSET + i * sizeof(elf_greg_t))
++#define ELF_BR_OFFSET(i) (ELF_BR_0_OFFSET + i * sizeof(elf_greg_t))
++#define ELF_AR_RSC_OFFSET (45 * sizeof(elf_greg_t))
++#define ELF_AR_BSP_OFFSET (46 * sizeof(elf_greg_t))
++#define ELF_AR_BSPSTORE_OFFSET (47 * sizeof(elf_greg_t))
++#define ELF_AR_RNAT_OFFSET (48 * sizeof(elf_greg_t))
++#define ELF_AR_CCV_OFFSET (49 * sizeof(elf_greg_t))
++#define ELF_AR_UNAT_OFFSET (50 * sizeof(elf_greg_t))
++#define ELF_AR_FPSR_OFFSET (51 * sizeof(elf_greg_t))
++#define ELF_AR_PFS_OFFSET (52 * sizeof(elf_greg_t))
++#define ELF_AR_LC_OFFSET (53 * sizeof(elf_greg_t))
++#define ELF_AR_EC_OFFSET (54 * sizeof(elf_greg_t))
++#define ELF_AR_CSD_OFFSET (55 * sizeof(elf_greg_t))
++#define ELF_AR_SSD_OFFSET (56 * sizeof(elf_greg_t))
++#define ELF_AR_END_OFFSET (57 * sizeof(elf_greg_t))
++
+ typedef unsigned long elf_fpxregset_t;
+
+ typedef unsigned long elf_greg_t;
linux-2.6-utrace-regset-s390.patch:
Index: linux-2.6-utrace-regset-s390.patch
===================================================================
RCS file: linux-2.6-utrace-regset-s390.patch
diff -N linux-2.6-utrace-regset-s390.patch
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ linux-2.6-utrace-regset-s390.patch 20 Jul 2007 18:48:03 -0000 1.3
@@ -0,0 +1,1067 @@
+[PATCH 2c] utrace: s390 regset support
+
+This patch converts the machine-dependent ptrace code into utrace regset
+support for s390.
+
+Signed-off-by: Roland McGrath <roland at redhat.com>
+CC: David Wilder <dwilder at us.ibm.com>
+
+---
+
+ arch/s390/kernel/Makefile | 2
+ arch/s390/kernel/ptrace.c | 952 ++++++++++++++++++---------------------------
+ 2 files changed, 390 insertions(+), 564 deletions(-)
+
+--- linux-2.6/arch/s390/kernel/Makefile
++++ linux-2.6/arch/s390/kernel/Makefile
+@@ -35,3 +35,5 @@ obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS
+ # This is just to get the dependencies...
+ #
+ binfmt_elf32.o: $(TOPDIR)/fs/binfmt_elf.c
++
++CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
+--- linux-2.6/arch/s390/kernel/ptrace.c
++++ linux-2.6/arch/s390/kernel/ptrace.c
+@@ -30,6 +30,7 @@
+ #include <linux/errno.h>
+ #include <linux/ptrace.h>
+ #include <linux/tracehook.h>
++#include <linux/module.h>
+ #include <linux/user.h>
+ #include <linux/security.h>
+ #include <linux/audit.h>
+@@ -42,6 +43,7 @@
+ #include <asm/system.h>
+ #include <asm/uaccess.h>
+ #include <asm/unistd.h>
++#include <asm/elf.h>
+
+ #ifdef CONFIG_COMPAT
+ #include "compat_ptrace.h"
+@@ -116,640 +118,462 @@ tracehook_single_step_enabled(struct tas
+ return task->thread.per_info.single_step;
+ }
+
+-/*
+- * Called by kernel/ptrace.c when detaching..
+- *
+- * Make sure single step bits etc are not set.
+- */
+-void
+-ptrace_disable(struct task_struct *child)
+-{
+- /* make sure the single step bit is not set. */
+- tracehook_disable_single_step(child);
+-}
+-
+-#ifndef CONFIG_64BIT
+-# define __ADDR_MASK 3
+-#else
+-# define __ADDR_MASK 7
+-#endif
+
+-/*
+- * Read the word at offset addr from the user area of a process. The
+- * trouble here is that the information is littered over different
+- * locations. The process registers are found on the kernel stack,
+- * the floating point stuff and the trace settings are stored in
+- * the task structure. In addition the different structures in
+- * struct user contain pad bytes that should be read as zeroes.
+- * Lovely...
+- */
+ static int
+-peek_user(struct task_struct *child, addr_t addr, addr_t data)
++genregs_get(struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ void *kbuf, void __user *ubuf)
+ {
+- struct user *dummy = NULL;
+- addr_t offset, tmp, mask;
++ struct pt_regs *regs = task_pt_regs(target);
++ unsigned long pswmask;
++ int ret;
+
+- /*
+- * Stupid gdb peeks/pokes the access registers in 64 bit with
+- * an alignment of 4. Programmers from hell...
+- */
+- mask = __ADDR_MASK;
+-#ifdef CONFIG_64BIT
+- if (addr >= (addr_t) &dummy->regs.acrs &&
+- addr < (addr_t) &dummy->regs.orig_gpr2)
+- mask = 3;
+-#endif
+- if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
+- return -EIO;
++ /* Remove per bit from user psw. */
++ pswmask = regs->psw.mask & ~PSW_MASK_PER;
++ ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
++ &pswmask, PT_PSWMASK, PT_PSWADDR);
++
++ /* The rest of the PSW and the GPRs are directly on the stack. */
++ if (ret == 0)
++ ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
++ ®s->psw.addr, PT_PSWADDR,
++ PT_ACR0);
++
++ /* The ACRs are kept in the thread_struct. */
++ if (ret == 0 && count > 0 && pos < PT_ORIGGPR2) {
++ if (target == current)
++ save_access_regs(target->thread.acrs);
++
++ ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
++ target->thread.acrs,
++ PT_ACR0, PT_ORIGGPR2);
++ }
+
+- if (addr < (addr_t) &dummy->regs.acrs) {
+- /*
+- * psw and gprs are stored on the stack
+- */
+- tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
+- if (addr == (addr_t) &dummy->regs.psw.mask)
+- /* Remove per bit from user psw. */
+- tmp &= ~PSW_MASK_PER;
++ /* Finally, the ORIG_GPR2 value. */
++ if (ret == 0)
++ ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
++ ®s->orig_gpr2, PT_ORIGGPR2, -1);
+
+- } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
+- /*
+- * access registers are stored in the thread structure
+- */
+- offset = addr - (addr_t) &dummy->regs.acrs;
+-#ifdef CONFIG_64BIT
+- /*
+- * Very special case: old & broken 64 bit gdb reading
+- * from acrs[15]. Result is a 64 bit value. Read the
+- * 32 bit acrs[15] value and shift it by 32. Sick...
+- */
+- if (addr == (addr_t) &dummy->regs.acrs[15])
+- tmp = ((unsigned long) child->thread.acrs[15]) << 32;
+- else
+-#endif
+- tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
++ return ret;
++}
+
+- } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
+- /*
+- * orig_gpr2 is stored on the kernel stack
+- */
+- tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
++static int
++genregs_set(struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ const void *kbuf, const void __user *ubuf)
++{
++ struct pt_regs *regs = task_pt_regs(target);
++ int ret = 0;
++
++ /* Check for an invalid PSW mask. */
++ if (count > 0 && pos == PT_PSWMASK) {
++ unsigned long pswmask = regs->psw.mask;
++ ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
++ &pswmask, PT_PSWMASK, PT_PSWADDR);
++ if (pswmask != PSW_MASK_MERGE(psw_user_bits, pswmask)
++#ifdef CONFIG_COMPAT
++ && pswmask != PSW_MASK_MERGE(psw_user32_bits, pswmask)
++#endif
++ )
++ /* Invalid psw mask. */
++ return -EINVAL;
++ regs->psw.mask = pswmask;
++ FixPerRegisters(target);
++ }
+
+- } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
+- /*
+- * floating point regs. are stored in the thread structure
+- */
+- offset = addr - (addr_t) &dummy->regs.fp_regs;
+- tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset);
+- if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
+- tmp &= (unsigned long) FPC_VALID_MASK
+- << (BITS_PER_LONG - 32);
++ /* The rest of the PSW and the GPRs are directly on the stack. */
++ if (ret == 0) {
++ ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
++ ®s->psw.addr, PT_PSWADDR,
++ PT_ACR0);
++#ifndef CONFIG_64BIT
++ /* I'd like to reject addresses without the
++ high order bit but older gdb's rely on it */
++ regs->psw.addr |= PSW_ADDR_AMODE;
++#endif
++ }
+
+- } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
+- /*
+- * per_info is found in the thread structure
+- */
+- offset = addr - (addr_t) &dummy->regs.per_info;
+- tmp = *(addr_t *)((addr_t) &child->thread.per_info + offset);
++ /* The ACRs are kept in the thread_struct. */
++ if (ret == 0 && count > 0 && pos < PT_ORIGGPR2) {
++ if (target == current
++ && (pos != PT_ACR0 || count < sizeof(target->thread.acrs)))
++ save_access_regs(target->thread.acrs);
++
++ ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
++ target->thread.acrs,
++ PT_ACR0, PT_ORIGGPR2);
++ if (ret == 0 && target == current)
++ restore_access_regs(target->thread.acrs);
++ }
+
+- } else
+- tmp = 0;
++ /* Finally, the ORIG_GPR2 value. */
++ if (ret == 0)
++ ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
++ ®s->orig_gpr2, PT_ORIGGPR2, -1);
+
+- return put_user(tmp, (addr_t __user *) data);
++ return ret;
+ }
+
+-/*
+- * Write a word to the user area of a process at location addr. This
+- * operation does have an additional problem compared to peek_user.
+- * Stores to the program status word and on the floating point
+- * control register needs to get checked for validity.
+- */
+ static int
+-poke_user(struct task_struct *child, addr_t addr, addr_t data)
++fpregs_get(struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ void *kbuf, void __user *ubuf)
+ {
+- struct user *dummy = NULL;
+- addr_t offset, mask;
++ if (target == current)
++ save_fp_regs(&target->thread.fp_regs);
+
+- /*
+- * Stupid gdb peeks/pokes the access registers in 64 bit with
+- * an alignment of 4. Programmers from hell indeed...
+- */
+- mask = __ADDR_MASK;
+-#ifdef CONFIG_64BIT
+- if (addr >= (addr_t) &dummy->regs.acrs &&
+- addr < (addr_t) &dummy->regs.orig_gpr2)
+- mask = 3;
+-#endif
+- if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
+- return -EIO;
++ return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
++ &target->thread.fp_regs, 0, -1);
++}
+
+- if (addr < (addr_t) &dummy->regs.acrs) {
+- /*
+- * psw and gprs are stored on the stack
+- */
+- if (addr == (addr_t) &dummy->regs.psw.mask &&
+-#ifdef CONFIG_COMPAT
+- data != PSW_MASK_MERGE(psw_user32_bits, data) &&
+-#endif
+- data != PSW_MASK_MERGE(psw_user_bits, data))
+- /* Invalid psw mask. */
++static int
++fpregs_set(struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ const void *kbuf, const void __user *ubuf)
++{
++ int ret = 0;
++
++ if (target == current && (pos != 0 || count != sizeof(s390_fp_regs)))
++ save_fp_regs(&target->thread.fp_regs);
++
++ /* If setting FPC, must validate it first. */
++ if (count > 0 && pos == 0) {
++ unsigned long fpc;
++ ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
++ &fpc, 0, sizeof(fpc));
++ if (ret)
++ return ret;
++
++ if ((fpc & ~((unsigned long) FPC_VALID_MASK
++ << (BITS_PER_LONG - 32))) != 0)
+ return -EINVAL;
+-#ifndef CONFIG_64BIT
+- if (addr == (addr_t) &dummy->regs.psw.addr)
+- /* I'd like to reject addresses without the
+- high order bit but older gdb's rely on it */
+- data |= PSW_ADDR_AMODE;
+-#endif
+- *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
+
+- } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
+- /*
+- * access registers are stored in the thread structure
+- */
+- offset = addr - (addr_t) &dummy->regs.acrs;
+-#ifdef CONFIG_64BIT
+- /*
+- * Very special case: old & broken 64 bit gdb writing
+- * to acrs[15] with a 64 bit value. Ignore the lower
+- * half of the value and write the upper 32 bit to
+- * acrs[15]. Sick...
+- */
+- if (addr == (addr_t) &dummy->regs.acrs[15])
+- child->thread.acrs[15] = (unsigned int) (data >> 32);
+- else
+-#endif
+- *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
++ memcpy(&target->thread.fp_regs, &fpc, sizeof(fpc));
++ }
+
+- } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
+- /*
+- * orig_gpr2 is stored on the kernel stack
+- */
+- task_pt_regs(child)->orig_gpr2 = data;
++ if (ret == 0)
++ ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
++ &target->thread.fp_regs, 0, -1);
+
+- } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
+- /*
+- * floating point regs. are stored in the thread structure
+- */
+- if (addr == (addr_t) &dummy->regs.fp_regs.fpc &&
+- (data & ~((unsigned long) FPC_VALID_MASK
+- << (BITS_PER_LONG - 32))) != 0)
+- return -EINVAL;
+- offset = addr - (addr_t) &dummy->regs.fp_regs;
+- *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data;
++ if (ret == 0 && target == current)
++ restore_fp_regs(&target->thread.fp_regs);
+
+- } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
+- /*
+- * per_info is found in the thread structure
+- */
+- offset = addr - (addr_t) &dummy->regs.per_info;
+- *(addr_t *)((addr_t) &child->thread.per_info + offset) = data;
+-
+- }
++ return ret;
++}
+
+- FixPerRegisters(child);
+- return 0;
++static int
++per_info_get(struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ void *kbuf, void __user *ubuf)
++{
++ return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
++ &target->thread.per_info, 0, -1);
+ }
+
+ static int
+-do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
++per_info_set(struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ const void *kbuf, const void __user *ubuf)
+ {
+- unsigned long tmp;
+- ptrace_area parea;
+- int copied, ret;
+-
+- switch (request) {
+- case PTRACE_PEEKTEXT:
+- case PTRACE_PEEKDATA:
+- /* Remove high order bit from address (only for 31 bit). */
+- addr &= PSW_ADDR_INSN;
+- /* read word at location addr. */
+- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+- if (copied != sizeof(tmp))
+- return -EIO;
+- return put_user(tmp, (unsigned long __force __user *) data);
+-
+- case PTRACE_PEEKUSR:
+- /* read the word at location addr in the USER area. */
+- return peek_user(child, addr, data);
+-
+- case PTRACE_POKETEXT:
+- case PTRACE_POKEDATA:
+- /* Remove high order bit from address (only for 31 bit). */
+- addr &= PSW_ADDR_INSN;
+- /* write the word at location addr. */
+- copied = access_process_vm(child, addr, &data, sizeof(data),1);
+- if (copied != sizeof(data))
+- return -EIO;
+- return 0;
+-
+- case PTRACE_POKEUSR:
+- /* write the word at location addr in the USER area */
+- return poke_user(child, addr, data);
+-
+- case PTRACE_PEEKUSR_AREA:
+- case PTRACE_POKEUSR_AREA:
+- if (copy_from_user(&parea, (void __force __user *) addr,
+- sizeof(parea)))
+- return -EFAULT;
+- addr = parea.kernel_addr;
+- data = parea.process_addr;
+- copied = 0;
+- while (copied < parea.len) {
+- if (request == PTRACE_PEEKUSR_AREA)
+- ret = peek_user(child, addr, data);
+- else {
+- addr_t utmp;
+- if (get_user(utmp,
+- (addr_t __force __user *) data))
+- return -EFAULT;
+- ret = poke_user(child, addr, utmp);
+- }
+- if (ret)
+- return ret;
+- addr += sizeof(unsigned long);
+- data += sizeof(unsigned long);
+- copied += sizeof(unsigned long);
+- }
+- return 0;
+- }
+- return ptrace_request(child, request, addr, data);
++ int ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
++ &target->thread.per_info, 0, -1);
++
++ FixPerRegisters(target);
++
++ return ret;
+ }
+
+-#ifdef CONFIG_COMPAT
+-/*
+- * Now the fun part starts... a 31 bit program running in the
+- * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
+- * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
+- * to handle, the difference to the 64 bit versions of the requests
+- * is that the access is done in multiples of 4 byte instead of
+- * 8 bytes (sizeof(unsigned long) on 31/64 bit).
+- * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
+- * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
+- * is a 31 bit program too, the content of struct user can be
+- * emulated. A 31 bit program peeking into the struct user of
+- * a 64 bit program is a no-no.
+- */
+
+ /*
+- * Same as peek_user but for a 31 bit program.
++ * These are our native regset flavors.
+ */
+-static int
+-peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
+-{
+- struct user32 *dummy32 = NULL;
+- per_struct32 *dummy_per32 = NULL;
+- addr_t offset;
+- __u32 tmp;
+-
+- if (!test_thread_flag(TIF_31BIT) ||
+- (addr & 3) || addr > sizeof(struct user) - 3)
+- return -EIO;
++static const struct utrace_regset native_regsets[] = {
++ {
++ .size = sizeof(long), .align = sizeof(long),
++ .n = sizeof(s390_regs) / sizeof(long),
++ .get = genregs_get, .set = genregs_set
++ },
++ {
++ .size = sizeof(long), .align = sizeof(long),
++ .n = sizeof(s390_fp_regs) / sizeof(long),
++ .get = fpregs_get, .set = fpregs_set
++ },
++ {
++ .size = sizeof(long), .align = sizeof(long),
++ .n = sizeof(per_struct) / sizeof(long),
++ .get = per_info_get, .set = per_info_set
++ },
++};
++
++static const struct utrace_regset_view utrace_s390_native_view = {
++ .name = UTS_MACHINE, .e_machine = ELF_ARCH,
++ .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
++};
+
+- if (addr < (addr_t) &dummy32->regs.acrs) {
+- /*
+- * psw and gprs are stored on the stack
+- */
+- if (addr == (addr_t) &dummy32->regs.psw.mask) {
+- /* Fake a 31 bit psw mask. */
+- tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32);
+- tmp = PSW32_MASK_MERGE(psw32_user_bits, tmp);
+- } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
+- /* Fake a 31 bit psw address. */
+- tmp = (__u32) task_pt_regs(child)->psw.addr |
+- PSW32_ADDR_AMODE31;
+- } else {
+- /* gpr 0-15 */
+- tmp = *(__u32 *)((addr_t) &task_pt_regs(child)->psw +
+- addr*2 + 4);
+- }
+- } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
+- /*
+- * access registers are stored in the thread structure
+- */
+- offset = addr - (addr_t) &dummy32->regs.acrs;
+- tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
+
+- } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
+- /*
+- * orig_gpr2 is stored on the kernel stack
+- */
+- tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
++#ifdef CONFIG_COMPAT
++static int
++s390_genregs_get(struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ void *kbuf, void __user *ubuf)
++{
++ struct pt_regs *regs = task_pt_regs(target);
++ int ret = 0;
++
++ /* Fake a 31 bit psw mask. */
++ if (count > 0 && pos == PT_PSWMASK / 2) {
++ u32 pswmask = PSW32_MASK_MERGE(psw32_user_bits,
++ (u32) (regs->psw.mask >> 32));
++ ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
++ &pswmask, PT_PSWMASK / 2,
++ PT_PSWADDR / 2);
++ }
+
+- } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
+- /*
+- * floating point regs. are stored in the thread structure
+- */
+- offset = addr - (addr_t) &dummy32->regs.fp_regs;
+- tmp = *(__u32 *)((addr_t) &child->thread.fp_regs + offset);
++ /* Fake a 31 bit psw address. */
++ if (ret == 0 && count > 0 && pos == PT_PSWADDR / 2) {
++ u32 pswaddr = (u32) regs->psw.addr | PSW32_ADDR_AMODE31;
++ ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
++ &pswaddr, PT_PSWADDR / 2,
++ PT_GPR0 / 2);
++ }
+
+- } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
+- /*
+- * per_info is found in the thread structure
+- */
+- offset = addr - (addr_t) &dummy32->regs.per_info;
+- /* This is magic. See per_struct and per_struct32. */
+- if ((offset >= (addr_t) &dummy_per32->control_regs &&
+- offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
+- (offset >= (addr_t) &dummy_per32->starting_addr &&
+- offset <= (addr_t) &dummy_per32->ending_addr) ||
+- offset == (addr_t) &dummy_per32->lowcore.words.address)
+- offset = offset*2 + 4;
++ /* The GPRs are directly on the stack. Just truncate them. */
++ while (ret == 0 && count > 0 && pos < PT_ACR0 / 2) {
++ u32 value = regs->gprs[(pos - PT_GPR0 / 2) / sizeof(u32)];
++ if (kbuf) {
++ *(u32 *) kbuf = value;
++ kbuf += sizeof(u32);
++ }
++ else if (put_user(value, (u32 __user *) ubuf))
++ ret = -EFAULT;
+ else
+- offset = offset*2;
+- tmp = *(__u32 *)((addr_t) &child->thread.per_info + offset);
++ ubuf += sizeof(u32);
++ pos += sizeof(u32);
++ count -= sizeof(u32);
++ }
+
+- } else
+- tmp = 0;
++ /* The ACRs are kept in the thread_struct. */
++ if (ret == 0 && count > 0 && pos < PT_ACR0 / 2 + NUM_ACRS * ACR_SIZE) {
++ if (target == current)
++ save_access_regs(target->thread.acrs);
++
++ ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
++ target->thread.acrs,
++ PT_ACR0 / 2,
++ PT_ACR0 / 2 + NUM_ACRS * ACR_SIZE);
++ }
+
+- return put_user(tmp, (__u32 __user *) data);
++ /* Finally, the ORIG_GPR2 value. */
++ if (count > 0) {
++ if (kbuf)
++ *(u32 *) kbuf = regs->orig_gpr2;
++ else if (put_user((u32) regs->orig_gpr2,
++ (u32 __user *) ubuf))
++ return -EFAULT;
++ }
++
++ return 0;
+ }
+
+-/*
+- * Same as poke_user but for a 31 bit program.
+- */
+ static int
+-poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
+-{
+- struct user32 *dummy32 = NULL;
+- per_struct32 *dummy_per32 = NULL;
+- addr_t offset;
+- __u32 tmp;
++s390_genregs_set(struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ const void *kbuf, const void __user *ubuf)
++{
++ struct pt_regs *regs = task_pt_regs(target);
++ int ret = 0;
++
++ /* Check for an invalid PSW mask. */
++ if (count > 0 && pos == PT_PSWMASK / 2) {
++ u32 pswmask;
++ ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
++ &pswmask, PT_PSWMASK / 2,
++ PT_PSWADDR / 2);
++ if (ret)
++ return ret;
+
+- if (!test_thread_flag(TIF_31BIT) ||
+- (addr & 3) || addr > sizeof(struct user32) - 3)
+- return -EIO;
++ if (pswmask != PSW_MASK_MERGE(psw_user32_bits, pswmask))
++ /* Invalid psw mask. */
++ return -EINVAL;
+
+- tmp = (__u32) data;
++ /* Build a 64 bit psw mask from 31 bit mask. */
++ regs->psw.mask = PSW_MASK_MERGE(psw_user32_bits,
++ (u64) pswmask << 32);
++ FixPerRegisters(target);
++ }
+
+- if (addr < (addr_t) &dummy32->regs.acrs) {
+- /*
+- * psw, gprs, acrs and orig_gpr2 are stored on the stack
+- */
+- if (addr == (addr_t) &dummy32->regs.psw.mask) {
++ /* Build a 64 bit psw address from 31 bit address. */
++ if (count > 0 && pos == PT_PSWADDR / 2) {
++ u32 pswaddr;
++ ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
++ &pswaddr, PT_PSWADDR / 2,
++ PT_GPR0 / 2);
++ if (ret == 0)
+ /* Build a 64 bit psw mask from 31 bit mask. */
+- if (tmp != PSW32_MASK_MERGE(psw32_user_bits, tmp))
+- /* Invalid psw mask. */
+- return -EINVAL;
+- task_pt_regs(child)->psw.mask =
+- PSW_MASK_MERGE(psw_user32_bits, (__u64) tmp << 32);
+- } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
+- /* Build a 64 bit psw address from 31 bit address. */
+- task_pt_regs(child)->psw.addr =
+- (__u64) tmp & PSW32_ADDR_INSN;
+- } else {
+- /* gpr 0-15 */
+- *(__u32*)((addr_t) &task_pt_regs(child)->psw
+- + addr*2 + 4) = tmp;
++ regs->psw.addr = pswaddr & PSW32_ADDR_INSN;
++ }
++
++ /* The GPRs are directly onto the stack. */
++ while (ret == 0 && count > 0 && pos < PT_ACR0 / 2) {
++ u32 value;
++
++ if (kbuf) {
++ value = *(const u32 *) kbuf;
++ kbuf += sizeof(u32);
+ }
+- } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
+- /*
+- * access registers are stored in the thread structure
+- */
+- offset = addr - (addr_t) &dummy32->regs.acrs;
+- *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
++ else if (get_user(value, (const u32 __user *) ubuf))
++ return -EFAULT;
++ else
++ ubuf += sizeof(u32);
++ pos += sizeof(u32);
++ count -= sizeof(u32);
+
+- } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
+- /*
+- * orig_gpr2 is stored on the kernel stack
+- */
+- *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
++ regs->gprs[(pos - PT_GPR0 / 2) / sizeof(u32)] = value;
++ }
+
+- } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
+- /*
+- * floating point regs. are stored in the thread structure
+- */
+- if (addr == (addr_t) &dummy32->regs.fp_regs.fpc &&
+- (tmp & ~FPC_VALID_MASK) != 0)
+- /* Invalid floating point control. */
+- return -EINVAL;
+- offset = addr - (addr_t) &dummy32->regs.fp_regs;
+- *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp;
++ /* The ACRs are kept in the thread_struct. */
++ if (count > 0 && pos < PT_ORIGGPR2 / 2) {
++ if (target == current
++ && (pos != PT_ACR0 / 2
++ || count < sizeof(target->thread.acrs)))
++ save_access_regs(target->thread.acrs);
++
++ ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
++ target->thread.acrs,
++ PT_ACR0 / 2,
++ PT_ACR0 / 2 + NUM_ACRS * ACR_SIZE);
+
+- } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
+- /*
+- * per_info is found in the thread structure.
+- */
+- offset = addr - (addr_t) &dummy32->regs.per_info;
+- /*
+- * This is magic. See per_struct and per_struct32.
+- * By incident the offsets in per_struct are exactly
+- * twice the offsets in per_struct32 for all fields.
+- * The 8 byte fields need special handling though,
+- * because the second half (bytes 4-7) is needed and
+- * not the first half.
+- */
+- if ((offset >= (addr_t) &dummy_per32->control_regs &&
+- offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
+- (offset >= (addr_t) &dummy_per32->starting_addr &&
+- offset <= (addr_t) &dummy_per32->ending_addr) ||
+- offset == (addr_t) &dummy_per32->lowcore.words.address)
+- offset = offset*2 + 4;
+- else
+- offset = offset*2;
+- *(__u32 *)((addr_t) &child->thread.per_info + offset) = tmp;
++ if (ret == 0 && target == current)
++ restore_access_regs(target->thread.acrs);
++ }
+
++ /* Finally, the ORIG_GPR2 value. */
++ if (ret == 0 && count > 0) {
++ u32 value;
++ if (kbuf)
++ value = *(const u32 *) kbuf;
++ else if (get_user(value, (const u32 __user *) ubuf))
++ return -EFAULT;
++ regs->orig_gpr2 = value;
+ }
+
+- FixPerRegisters(child);
+- return 0;
++ return ret;
+ }
+
+-static int
+-do_ptrace_emu31(struct task_struct *child, long request, long addr, long data)
++
++/*
++ * This is magic. See per_struct and per_struct32.
++ * By incident the offsets in per_struct are exactly
++ * twice the offsets in per_struct32 for all fields.
++ * The 8 byte fields need special handling though,
++ * because the second half (bytes 4-7) is needed and
++ * not the first half.
++ */
++static unsigned int
++offset_from_per32(unsigned int offset)
+ {
+- unsigned int tmp; /* 4 bytes !! */
+- ptrace_area_emu31 parea;
+- int copied, ret;
+-
+- switch (request) {
+- case PTRACE_PEEKTEXT:
+- case PTRACE_PEEKDATA:
+- /* read word at location addr. */
+- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+- if (copied != sizeof(tmp))
+- return -EIO;
+- return put_user(tmp, (unsigned int __force __user *) data);
+-
+- case PTRACE_PEEKUSR:
+- /* read the word at location addr in the USER area. */
+- return peek_user_emu31(child, addr, data);
+-
+- case PTRACE_POKETEXT:
+- case PTRACE_POKEDATA:
+- /* write the word at location addr. */
+- tmp = data;
+- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 1);
+- if (copied != sizeof(tmp))
+- return -EIO;
+- return 0;
+-
+- case PTRACE_POKEUSR:
+- /* write the word at location addr in the USER area */
+- return poke_user_emu31(child, addr, data);
+-
+- case PTRACE_PEEKUSR_AREA:
+- case PTRACE_POKEUSR_AREA:
+- if (copy_from_user(&parea, (void __force __user *) addr,
+- sizeof(parea)))
+- return -EFAULT;
+- addr = parea.kernel_addr;
+- data = parea.process_addr;
+- copied = 0;
+- while (copied < parea.len) {
+- if (request == PTRACE_PEEKUSR_AREA)
+- ret = peek_user_emu31(child, addr, data);
+- else {
+- __u32 utmp;
+- if (get_user(utmp,
+- (__u32 __force __user *) data))
+- return -EFAULT;
+- ret = poke_user_emu31(child, addr, utmp);
+- }
+- if (ret)
+- return ret;
+- addr += sizeof(unsigned int);
+- data += sizeof(unsigned int);
+- copied += sizeof(unsigned int);
++ BUILD_BUG_ON(offsetof(per_struct32, control_regs) != 0);
++ if (offset - offsetof(per_struct32, control_regs) < 3*sizeof(u32)
++ || (offset >= offsetof(per_struct32, starting_addr) &&
++ offset <= offsetof(per_struct32, ending_addr))
++ || offset == offsetof(per_struct32, lowcore.words.address))
++ offset = offset*2 + 4;
++ else
++ offset = offset*2;
++ return offset;
++}
++
++static int
++s390_per_info_get(struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ void *kbuf, void __user *ubuf)
++{
++ while (count > 0) {
++ u32 val = *(u32 *) ((char *) &target->thread.per_info
++ + offset_from_per32 (pos));
++ if (kbuf) {
++ *(u32 *) kbuf = val;
++ kbuf += sizeof(u32);
+ }
+- return 0;
+-#if 0 /* XXX */
+- case PTRACE_GETEVENTMSG:
+- return put_user((__u32) child->ptrace_message,
+- (unsigned int __force __user *) data);
+- case PTRACE_GETSIGINFO:
+- if (child->last_siginfo == NULL)
+- return -EINVAL;
+- return copy_siginfo_to_user32((compat_siginfo_t
+- __force __user *) data,
+- child->last_siginfo);
+- case PTRACE_SETSIGINFO:
+- if (child->last_siginfo == NULL)
+- return -EINVAL;
+- return copy_siginfo_from_user32(child->last_siginfo,
+- (compat_siginfo_t
+- __force __user *) data);
++ else if (put_user(val, (u32 __user *) ubuf))
++ return -EFAULT;
++ else
++ ubuf += sizeof(u32);
++ pos += sizeof(u32);
++ count -= sizeof(u32);
+ }
+- return ptrace_request(child, request, addr, data);
++ return 0;
+ }
+-#endif
+-
+-#define PT32_IEEE_IP 0x13c
+
+ static int
+-do_ptrace(struct task_struct *child, long request, long addr, long data)
+-{
+- int ret;
+-
+- if (request == PTRACE_ATTACH)
+- return ptrace_attach(child);
++s390_per_info_set(struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ const void *kbuf, const void __user *ubuf)
++{
++ while (count > 0) {
++ u32 val;
++
++ if (kbuf) {
++ val = *(const u32 *) kbuf;
++ kbuf += sizeof(u32);
++ }
++ else if (get_user(val, (const u32 __user *) ubuf))
++ return -EFAULT;
++ else
++ ubuf += sizeof(u32);
++ pos += sizeof(u32);
++ count -= sizeof(u32);
+
+- /*
+- * Special cases to get/store the ieee instructions pointer.
+- */
+- if (child == current) {
+- if (request == PTRACE_PEEKUSR && addr == PT_IEEE_IP)
+- return peek_user(child, addr, data);
+- if (request == PTRACE_POKEUSR && addr == PT_IEEE_IP)
+- return poke_user(child, addr, data);
+-#ifdef CONFIG_COMPAT
+- if (request == PTRACE_PEEKUSR &&
+- addr == PT32_IEEE_IP && test_thread_flag(TIF_31BIT))
+- return peek_user_emu31(child, addr, data);
+- if (request == PTRACE_POKEUSR &&
+- addr == PT32_IEEE_IP && test_thread_flag(TIF_31BIT))
+- return poke_user_emu31(child, addr, data);
+-#endif
++ *(u32 *) ((char *) &target->thread.per_info
++ + offset_from_per32 (pos)) = val;
+ }
++ return 0;
++}
+
+- ret = ptrace_check_attach(child, request == PTRACE_KILL);
+- if (ret < 0)
+- return ret;
+-
+- switch (request) {
+- case PTRACE_SYSCALL:
+- /* continue and stop at next (return from) syscall */
+- case PTRACE_CONT:
+- /* restart after signal. */
+- if (!valid_signal(data))
+- return -EIO;
+- if (request == PTRACE_SYSCALL)
+- set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+- else
+- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+- child->exit_code = data;
+- /* make sure the single step bit is not set. */
+- tracehook_disable_single_step(child);
+- wake_up_process(child);
+- return 0;
+-
+- case PTRACE_KILL:
+- /*
+- * make the child exit. Best I can do is send it a sigkill.
+- * perhaps it should be put in the status that it wants to
+- * exit.
+- */
+- if (child->exit_state == EXIT_ZOMBIE) /* already dead */
+- return 0;
+- child->exit_code = SIGKILL;
+- /* make sure the single step bit is not set. */
+- tracehook_disable_single_step(child);
+- wake_up_process(child);
+- return 0;
+-
+- case PTRACE_SINGLESTEP:
+- /* set the trap flag. */
+- if (!valid_signal(data))
+- return -EIO;
+- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+- child->exit_code = data;
+- if (data)
+- set_tsk_thread_flag(child, TIF_SINGLE_STEP);
+- else
+- tracehook_enable_single_step(child);
+- /* give it a chance to run. */
+- wake_up_process(child);
+- return 0;
+-
+- case PTRACE_DETACH:
+- /* detach a process that was attached. */
+- return ptrace_detach(child, data);
+
++static const struct utrace_regset s390_compat_regsets[] = {
++ {
++ .size = sizeof(u32), .align = sizeof(u32),
++ .n = sizeof(s390_regs) / sizeof(long),
++ .get = s390_genregs_get, .set = s390_genregs_set
++ },
++ {
++ .size = sizeof(u32), .align = sizeof(u32),
++ .n = sizeof(s390_fp_regs) / sizeof(u32),
++ .get = fpregs_get, .set = fpregs_set
++ },
++ {
++ .size = sizeof(u32), .align = sizeof(u32),
++ .n = sizeof(per_struct) / sizeof(u32),
++ .get = s390_per_info_get, .set = s390_per_info_set
++ },
++};
++
++static const struct utrace_regset_view utrace_s390_compat_view = {
++ .name = "s390", .e_machine = EM_S390,
++ .regsets = s390_compat_regsets, .n = ARRAY_SIZE(s390_compat_regsets)
++};
++#endif /* CONFIG_COMPAT */
+
+- /* Do requests that differ for 31/64 bit */
+- default:
++const struct utrace_regset_view *utrace_native_view(struct task_struct *tsk)
++{
+ #ifdef CONFIG_COMPAT
+- if (test_thread_flag(TIF_31BIT))
+- return do_ptrace_emu31(child, request, addr, data);
++ if (test_tsk_thread_flag(tsk, TIF_31BIT))
++ return &utrace_s390_compat_view;
+ #endif
+- return do_ptrace_normal(child, request, addr, data);
+- }
+- /* Not reached. */
+- return -EIO;
++ return &utrace_s390_native_view;
+ }
+
+-asmlinkage long
+-sys_ptrace(long request, long pid, long addr, long data)
+-{
+- struct task_struct *child;
+- int ret;
+-
+- lock_kernel();
+- if (request == PTRACE_TRACEME) {
+- ret = ptrace_traceme();
+- goto out;
+- }
+-
+- child = ptrace_get_task_struct(pid);
+- if (IS_ERR(child)) {
+- ret = PTR_ERR(child);
+- goto out;
+- }
+-
+- ret = do_ptrace(child, request, addr, data);
+- put_task_struct(child);
+-out:
+- unlock_kernel();
+- return ret;
+-}
+
+ asmlinkage void
+ syscall_trace(struct pt_regs *regs, int entryexit)
linux-2.6-utrace-regset-sparc64.patch:
Index: linux-2.6-utrace-regset-sparc64.patch
===================================================================
RCS file: linux-2.6-utrace-regset-sparc64.patch
diff -N linux-2.6-utrace-regset-sparc64.patch
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ linux-2.6-utrace-regset-sparc64.patch 20 Jul 2007 18:48:03 -0000 1.3
@@ -0,0 +1,726 @@
+[PATCH 2b] utrace: sparc64 regset support
+
+This patch converts the machine-dependent ptrace code into utrace regset
+support for sparc64.
+
+Signed-off-by: Roland McGrath <roland at redhat.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+
+---
+
+ arch/sparc64/kernel/Makefile | 2
+ arch/sparc64/kernel/ptrace.c | 631 +++++++++++++++++++++++++++++++++++++----
+ arch/sparc64/kernel/systbls.S | 4
+ 3 files changed, 568 insertions(+), 69 deletions(-)
+
+--- linux-2.6/arch/sparc64/kernel/Makefile
++++ linux-2.6/arch/sparc64/kernel/Makefile
+@@ -5,6 +5,8 @@
+ EXTRA_AFLAGS := -ansi
+ EXTRA_CFLAGS := -Werror
+
++CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
++
+ extra-y := head.o init_task.o vmlinux.lds
+
+ obj-y := process.o setup.o cpu.o idprom.o \
+--- linux-2.6/arch/sparc64/kernel/ptrace.c
++++ linux-2.6/arch/sparc64/kernel/ptrace.c
+@@ -1,6 +1,6 @@
+-/* ptrace.c: Sparc process tracing support.
++/* ptrace.c: Sparc64 process tracing support.
+ *
+- * Copyright (C) 1996 David S. Miller (davem at caipfs.rutgers.edu)
++ * Copyright (C) 1996, 2006 David S. Miller (davem at davemloft.net)
+ * Copyright (C) 1997 Jakub Jelinek (jj at sunsite.mff.cuni.cz)
+ *
+ * Based upon code written by Ross Biro, Linus Torvalds, Bob Manson,
+@@ -11,106 +11,603 @@
+ */
+
+ #include <linux/kernel.h>
++#include <linux/module.h>
+ #include <linux/sched.h>
+ #include <linux/mm.h>
+-#include <linux/errno.h>
+-#include <linux/ptrace.h>
+-#include <linux/user.h>
+-#include <linux/smp.h>
+-#include <linux/smp_lock.h>
+ #include <linux/security.h>
+ #include <linux/seccomp.h>
+ #include <linux/audit.h>
+-#include <linux/signal.h>
+ #include <linux/tracehook.h>
++#include <linux/elf.h>
++#include <linux/ptrace.h>
+
+ #include <asm/asi.h>
+ #include <asm/pgtable.h>
+ #include <asm/system.h>
+-#include <asm/uaccess.h>
+-#include <asm/psrcompat.h>
+-#include <asm/visasm.h>
+ #include <asm/spitfire.h>
+ #include <asm/page.h>
+ #include <asm/cpudata.h>
++#include <asm/psrcompat.h>
+
+-#if 0 /* XXX */
+-/* Returning from ptrace is a bit tricky because the syscall return
+- * low level code assumes any value returned which is negative and
+- * is a valid errno will mean setting the condition codes to indicate
+- * an error return. This doesn't work, so we have this hook.
++#define GENREG_G0 0
++#define GENREG_O0 8
++#define GENREG_L0 16
++#define GENREG_I0 24
++#define GENREG_TSTATE 32
++#define GENREG_TPC 33
++#define GENREG_TNPC 34
++#define GENREG_Y 35
++
++#define SPARC64_NGREGS 36
++
++static int genregs_get(struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ void *kbuf, void __user *ubuf)
++{
++ struct pt_regs *regs = task_pt_regs(target);
++ int err;
++
++ err = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf, regs->u_regs,
++ GENREG_G0 * 8, GENREG_L0 * 8);
++
++ if (err == 0 && count > 0 && pos < (GENREG_TSTATE * 8)) {
++ struct thread_info *t = task_thread_info(target);
++ unsigned long rwindow[16], fp, *win;
++ int wsaved;
++
++ if (target == current)
++ flushw_user();
++
++ wsaved = __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_WSAVED];
++ fp = regs->u_regs[UREG_FP] + STACK_BIAS;
++ if (wsaved && t->rwbuf_stkptrs[wsaved - 1] == fp)
++ win = &t->reg_window[wsaved - 1].locals[0];
++ else {
++ if (target == current) {
++ if (copy_from_user(rwindow,
++ (void __user *) fp,
++ 16 * sizeof(long)))
++ err = -EFAULT;
++ } else
++ err = access_process_vm(target, fp, rwindow,
++ 16 * sizeof(long), 0);
++ if (err)
++ return err;
++ win = rwindow;
++ }
++
++ err = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
++ win, GENREG_L0 * 8,
++ GENREG_TSTATE * 8);
++ }
++
++ if (err == 0)
++ err = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
++ ®s->tstate, GENREG_TSTATE * 8,
++ GENREG_Y * 8);
++ if (err == 0 && count > 0) {
++ if (kbuf)
++ *(unsigned long *) kbuf = regs->y;
++ else if (put_user(regs->y, (unsigned long __user *) ubuf))
++ return -EFAULT;
++ }
++
++ return err;
++}
++
++/* Consistent with signal handling, we only allow userspace to
++ * modify the %asi, %icc, and %xcc fields of the %tstate register.
+ */
+-static inline void pt_error_return(struct pt_regs *regs, unsigned long error)
++#define TSTATE_DEBUGCHANGE (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC)
++
++static int genregs_set(struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ const void *kbuf, const void __user *ubuf)
+ {
+- regs->u_regs[UREG_I0] = error;
+- regs->tstate |= (TSTATE_ICARRY | TSTATE_XCARRY);
+- regs->tpc = regs->tnpc;
+- regs->tnpc += 4;
++ struct pt_regs *regs = task_pt_regs(target);
++ unsigned long tstate_save;
++ int err;
++
++ err = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf, regs->u_regs,
++ GENREG_G0 * 8, GENREG_L0 * 8);
++
++ if (err == 0 && count > 0 && pos < (GENREG_TSTATE * 8)) {
++ unsigned long fp = regs->u_regs[UREG_FP] + STACK_BIAS;
++ unsigned long rwindow[16], *winbuf;
++ unsigned int copy = (GENREG_TSTATE * 8) - pos;
++ unsigned int off;
++ int err;
++
++ if (target == current)
++ flushw_user();
++
++ if (count < copy)
++ copy = count;
++ off = pos - (GENREG_L0 * 8);
++
++ if (kbuf) {
++ winbuf = (unsigned long *) kbuf;
++ kbuf += copy;
++ }
++ else {
++ winbuf = rwindow;
++ if (copy_from_user(winbuf, ubuf, copy))
++ return -EFAULT;
++ ubuf += copy;
++ }
++ count -= copy;
++ pos += copy;
++
++ if (target == current)
++ err = copy_to_user((void __user *) fp + off,
++ winbuf, copy);
++ else
++ err = access_process_vm(target, fp + off,
++ winbuf, copy, 1);
++ }
++
++ tstate_save = regs->tstate &~ TSTATE_DEBUGCHANGE;
++
++ if (err == 0)
++ err = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
++ ®s->tstate, GENREG_TSTATE * 8,
++ GENREG_Y * 8);
++
++ regs->tstate &= TSTATE_DEBUGCHANGE;
++ regs->tstate |= tstate_save;
++
++ if (err == 0 && count > 0) {
++ if (kbuf)
++ regs->y = *(unsigned long *) kbuf;
++ else if (get_user(regs->y, (unsigned long __user *) ubuf))
++ return -EFAULT;
++ }
++
++ return err;
++}
++
++#define FPREG_F0 0
++#define FPREG_FSR 32
++#define FPREG_GSR 33
++#define FPREG_FPRS 34
++
++#define SPARC64_NFPREGS 35
++
++static int fpregs_get(struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ void *kbuf, void __user *ubuf)
++{
++ struct thread_info *t = task_thread_info(target);
++ int err;
++
++ err = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
++ t->fpregs, FPREG_F0 * 8, FPREG_FSR * 8);
++
++ if (err == 0)
++ err = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
++ &t->xfsr[0], FPREG_FSR * 8,
++ FPREG_GSR * 8);
++
++ if (err == 0)
++ err = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
++ &t->gsr[0], FPREG_GSR * 8,
++ FPREG_FPRS * 8);
++
++ if (err == 0 && count > 0) {
++ struct pt_regs *regs = task_pt_regs(target);
++
++ if (kbuf)
++ *(unsigned long *) kbuf = regs->fprs;
++ else if (put_user(regs->fprs, (unsigned long __user *) ubuf))
++ return -EFAULT;
++ }
++
++ return err;
+ }
+
+-static inline void pt_succ_return(struct pt_regs *regs, unsigned long value)
++static int fpregs_set(struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ const void *kbuf, const void __user *ubuf)
+ {
+- regs->u_regs[UREG_I0] = value;
+- regs->tstate &= ~(TSTATE_ICARRY | TSTATE_XCARRY);
+- regs->tpc = regs->tnpc;
+- regs->tnpc += 4;
++ struct thread_info *t = task_thread_info(target);
++ int err;
++
++ err = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
++ t->fpregs, FPREG_F0 * 8, FPREG_FSR * 8);
++
++ if (err == 0)
++ err = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
++ &t->xfsr[0], FPREG_FSR * 8,
++ FPREG_GSR * 8);
++
++ if (err == 0)
++ err = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
++ &t->gsr[0], FPREG_GSR * 8,
++ FPREG_FPRS * 8);
++
++ if (err == 0 && count > 0) {
++ struct pt_regs *regs = task_pt_regs(target);
++
++ if (kbuf)
++ regs->fprs = *(unsigned long *) kbuf;
++ else if (get_user(regs->fprs, (unsigned long __user *) ubuf))
++ return -EFAULT;
++ }
++
++ return err;
+ }
+
+-static inline void
+-pt_succ_return_linux(struct pt_regs *regs, unsigned long value, void __user *addr)
++static const struct utrace_regset native_regsets[] = {
++ {
++ .n = SPARC64_NGREGS,
++ .size = sizeof(long), .align = sizeof(long),
++ .get = genregs_get, .set = genregs_set
++ },
++ {
++ .n = SPARC64_NFPREGS,
++ .size = sizeof(long), .align = sizeof(long),
++ .get = fpregs_get, .set = fpregs_set
++ },
++};
++
++static const struct utrace_regset_view utrace_sparc64_native_view = {
++ .name = UTS_MACHINE, .e_machine = ELF_ARCH,
++ .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
++};
++
++#ifdef CONFIG_COMPAT
++
++#define GENREG32_G0 0
++#define GENREG32_O0 8
++#define GENREG32_L0 16
++#define GENREG32_I0 24
++#define GENREG32_PSR 32
++#define GENREG32_PC 33
++#define GENREG32_NPC 34
++#define GENREG32_Y 35
++#define GENREG32_WIM 36
++#define GENREG32_TBR 37
++
++#define SPARC32_NGREGS 38
++
++static int genregs32_get(struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ void *kbuf, void __user *ubuf)
+ {
+- if (test_thread_flag(TIF_32BIT)) {
+- if (put_user(value, (unsigned int __user *) addr)) {
+- pt_error_return(regs, EFAULT);
+- return;
+- }
+- } else {
+- if (put_user(value, (long __user *) addr)) {
+- pt_error_return(regs, EFAULT);
+- return;
++ struct pt_regs *regs = task_pt_regs(target);
++
++ while (count > 0 && pos < (GENREG32_L0 * 4)) {
++ u32 val = regs->u_regs[(pos - (GENREG32_G0*4))/sizeof(u32)];
++ if (kbuf) {
++ *(u32 *) kbuf = val;
++ kbuf += sizeof(u32);
++ } else if (put_user(val, (u32 __user *) ubuf))
++ return -EFAULT;
++ else
++ ubuf += sizeof(u32);
++ pos += sizeof(u32);
++ count -= sizeof(u32);
++ }
++
++ if (count > 0 && pos < (GENREG32_PSR * 4)) {
++ struct thread_info *t = task_thread_info(target);
++ unsigned long fp;
++ u32 rwindow[16];
++ int wsaved;
++
++ if (target == current)
++ flushw_user();
++
++ wsaved = __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_WSAVED];
++ fp = regs->u_regs[UREG_FP] & 0xffffffffUL;
++ if (wsaved && t->rwbuf_stkptrs[wsaved - 1] == fp) {
++ int i;
++ for (i = 0; i < 8; i++)
++ rwindow[i + 0] =
++ t->reg_window[wsaved-1].locals[i];
++ for (i = 0; i < 8; i++)
++ rwindow[i + 8] =
++ t->reg_window[wsaved-1].ins[i];
++ } else {
++ int err;
++
++ if (target == current) {
++ err = 0;
++ if (copy_from_user(rwindow, (void __user *) fp,
++ 16 * sizeof(u32)))
++ err = -EFAULT;
++ } else
++ err = access_process_vm(target, fp, rwindow,
++ 16 * sizeof(u32), 0);
++ if (err)
++ return err;
++ }
++
++ while (count > 0 && pos < (GENREG32_PSR * 4)) {
++ u32 val = rwindow[(pos - (GENREG32_L0*4))/sizeof(u32)];
++
++ if (kbuf) {
++ *(u32 *) kbuf = val;
++ kbuf += sizeof(u32);
++ } else if (put_user(val, (u32 __user *) ubuf))
++ return -EFAULT;
++ else
++ ubuf += sizeof(u32);
++ pos += sizeof(u32);
++ count -= sizeof(u32);
+ }
+ }
+- regs->u_regs[UREG_I0] = 0;
+- regs->tstate &= ~(TSTATE_ICARRY | TSTATE_XCARRY);
+- regs->tpc = regs->tnpc;
+- regs->tnpc += 4;
++
++ if (count > 0 && pos == (GENREG32_PSR * 4)) {
++ u32 psr = tstate_to_psr(regs->tstate);
++
++ if (kbuf) {
++ *(u32 *) kbuf = psr;
++ kbuf += sizeof(u32);
++ } else if (put_user(psr, (u32 __user *) ubuf))
++ return -EFAULT;
++ else
++ ubuf += sizeof(u32);
++ pos += sizeof(u32);
++ count -= sizeof(u32);
++ }
++
++ if (count > 0 && pos == (GENREG32_PC * 4)) {
++ u32 val = regs->tpc;
++
++ if (kbuf) {
++ *(u32 *) kbuf = val;
++ kbuf += sizeof(u32);
++ } else if (put_user(val, (u32 __user *) ubuf))
++ return -EFAULT;
++ else
++ ubuf += sizeof(u32);
++ pos += sizeof(u32);
++ count -= sizeof(u32);
++ }
++
++ if (count > 0 && pos == (GENREG32_NPC * 4)) {
++ u32 val = regs->tnpc;
++
++ if (kbuf) {
++ *(u32 *) kbuf = val;
++ kbuf += sizeof(u32);
++ } else if (put_user(val, (u32 __user *) ubuf))
++ return -EFAULT;
++ else
++ ubuf += sizeof(u32);
++ pos += sizeof(u32);
++ count -= sizeof(u32);
++ }
++
++ if (count > 0 && pos == (GENREG32_Y * 4)) {
++ if (kbuf) {
++ *(u32 *) kbuf = regs->y;
++ kbuf += sizeof(u32);
++ } else if (put_user(regs->y, (u32 __user *) ubuf))
++ return -EFAULT;
++ else
++ ubuf += sizeof(u32);
++ pos += sizeof(u32);
++ count -= sizeof(u32);
++ }
++
++ if (count > 0) {
++ if (kbuf)
++ memset(kbuf, 0, count);
++ else if (clear_user(ubuf, count))
++ return -EFAULT;
++ }
++
++ return 0;
++}
++
++static int genregs32_set(struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ const void *kbuf, const void __user *ubuf)
++{
++ struct pt_regs *regs = task_pt_regs(target);
++
++ while (count > 0 && pos < (GENREG32_L0 * 4)) {
++ unsigned long *loc;
++ loc = ®s->u_regs[(pos - (GENREG32_G0*4))/sizeof(u32)];
++ if (kbuf) {
++ *loc = *(u32 *) kbuf;
++ kbuf += sizeof(u32);
++ } else if (get_user(*loc, (u32 __user *) ubuf))
++ return -EFAULT;
++ else
++ ubuf += sizeof(u32);
++ pos += sizeof(u32);
++ count -= sizeof(u32);
++ }
++
++ if (count > 0 && pos < (GENREG32_PSR * 4)) {
++ unsigned long fp;
++ u32 regbuf[16];
++ unsigned int off, copy;
++ int err;
++
++ if (target == current)
++ flushw_user();
++
++ copy = (GENREG32_PSR * 4) - pos;
++ if (count < copy)
++ copy = count;
++ BUG_ON(copy > 16 * sizeof(u32));
++
++ fp = regs->u_regs[UREG_FP] & 0xffffffffUL;
++ off = pos - (GENREG32_L0 * 4);
++ if (kbuf) {
++ memcpy(regbuf, kbuf, copy);
++ kbuf += copy;
++ } else if (copy_from_user(regbuf, ubuf, copy))
++ return -EFAULT;
++ else
++ ubuf += copy;
++ pos += copy;
++ count -= copy;
++
++ if (target == current) {
++ err = 0;
++ if (copy_to_user((void __user *) fp + off,
++ regbuf, count))
++ err = -EFAULT;
++ } else
++ err = access_process_vm(target, fp + off,
++ regbuf, count, 1);
++ if (err)
++ return err;
++ }
++
++ if (count > 0 && pos == (GENREG32_PSR * 4)) {
++ unsigned long tstate, tstate_save;
++ u32 psr;
++
++ tstate_save = regs->tstate&~(TSTATE_ICC|TSTATE_XCC);
++
++ if (kbuf) {
++ psr = *(u32 *) kbuf;
++ kbuf += sizeof(u32);
++ } else if (get_user(psr, (u32 __user *) ubuf))
++ return -EFAULT;
++ else
++ ubuf += sizeof(u32);
++ pos += sizeof(u32);
++ count -= sizeof(u32);
++
++ tstate = psr_to_tstate_icc(psr);
++ regs->tstate = tstate_save | tstate;
++ }
++
++ if (count > 0 && pos == (GENREG32_PC * 4)) {
++ if (kbuf) {
++ regs->tpc = *(u32 *) kbuf;
++ kbuf += sizeof(u32);
++ } else if (get_user(regs->tpc, (u32 __user *) ubuf))
++ return -EFAULT;
++ else
++ ubuf += sizeof(u32);
++ pos += sizeof(u32);
++ count -= sizeof(u32);
++ }
++
++ if (count > 0 && pos == (GENREG32_NPC * 4)) {
++ if (kbuf) {
++ regs->tnpc = *(u32 *) kbuf;
++ kbuf += sizeof(u32);
++ } else if (get_user(regs->tnpc, (u32 __user *) ubuf))
++ return -EFAULT;
++ else
++ ubuf += sizeof(u32);
++ pos += sizeof(u32);
++ count -= sizeof(u32);
++ }
++
++ if (count > 0 && pos == (GENREG32_Y * 4)) {
++ if (kbuf) {
++ regs->y = *(u32 *) kbuf;
++ kbuf += sizeof(u32);
++ } else if (get_user(regs->y, (u32 __user *) ubuf))
++ return -EFAULT;
++ else
++ ubuf += sizeof(u32);
++ pos += sizeof(u32);
++ count -= sizeof(u32);
++ }
++
++ /* Ignore WIM and TBR */
++
++ return 0;
+ }
+
+-static void
+-pt_os_succ_return (struct pt_regs *regs, unsigned long val, void __user *addr)
++#define FPREG32_F0 0
++#define FPREG32_FSR 32
++
++#define SPARC32_NFPREGS 33
++
++static int fpregs32_get(struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ void *kbuf, void __user *ubuf)
+ {
+- if (current->personality == PER_SUNOS)
+- pt_succ_return (regs, val);
+- else
+- pt_succ_return_linux (regs, val, addr);
++ struct thread_info *t = task_thread_info(target);
++ int err;
++
++ err = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
++ t->fpregs, FPREG32_F0 * 4,
++ FPREG32_FSR * 4);
++
++ if (err == 0 && count > 0) {
++ if (kbuf) {
++ *(u32 *) kbuf = t->xfsr[0];
++ } else if (put_user(t->xfsr[0], (u32 __user *) ubuf))
++ return -EFAULT;
++ }
++
++ return err;
+ }
+-#endif
+
+-/* #define ALLOW_INIT_TRACING */
+-/* #define DEBUG_PTRACE */
++static int fpregs32_set(struct task_struct *target,
++ const struct utrace_regset *regset,
++ unsigned int pos, unsigned int count,
++ const void *kbuf, const void __user *ubuf)
++{
++ struct thread_info *t = task_thread_info(target);
++ int err;
+
+-#ifdef DEBUG_PTRACE
+-char *pt_rq [] = {
+- /* 0 */ "TRACEME", "PEEKTEXT", "PEEKDATA", "PEEKUSR",
+- /* 4 */ "POKETEXT", "POKEDATA", "POKEUSR", "CONT",
+- /* 8 */ "KILL", "SINGLESTEP", "SUNATTACH", "SUNDETACH",
+- /* 12 */ "GETREGS", "SETREGS", "GETFPREGS", "SETFPREGS",
+- /* 16 */ "READDATA", "WRITEDATA", "READTEXT", "WRITETEXT",
+- /* 20 */ "GETFPAREGS", "SETFPAREGS", "unknown", "unknown",
+- /* 24 */ "SYSCALL", ""
++ err = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
++ t->fpregs, FPREG32_F0 * 4,
++ FPREG32_FSR * 4);
++
++ if (err == 0 && count > 0) {
++ u32 fsr;
++ if (kbuf) {
++ fsr = *(u32 *) kbuf;
++ } else if (get_user(fsr, (u32 __user *) ubuf))
++ return -EFAULT;
++ t->xfsr[0] = (t->xfsr[0] & 0xffffffff00000000UL) | fsr;
++ }
++
++ return 0;
++}
++
++static const struct utrace_regset sparc32_regsets[] = {
++ {
++ .n = SPARC32_NGREGS,
++ .size = sizeof(u32), .align = sizeof(u32),
++ .get = genregs32_get, .set = genregs32_set
++ },
++ {
++ .n = SPARC32_NFPREGS,
++ .size = sizeof(u32), .align = sizeof(u32),
++ .get = fpregs32_get, .set = fpregs32_set
++ },
+ };
+-#endif
+
+-/*
+- * Called by kernel/ptrace.c when detaching..
+- *
+- * Make sure single step bits etc are not set.
+- */
+-void ptrace_disable(struct task_struct *child)
++static const struct utrace_regset_view utrace_sparc32_view = {
++ .name = "sparc", .e_machine = EM_SPARC,
++ .regsets = sparc32_regsets, .n = ARRAY_SIZE(sparc32_regsets)
++};
++
++#endif /* CONFIG_COMPAT */
++
++const struct utrace_regset_view *utrace_native_view(struct task_struct *tsk)
+ {
+- /* nothing to do */
++#ifdef CONFIG_COMPAT
++ if (test_tsk_thread_flag(tsk, TIF_32BIT))
++ return &utrace_sparc32_view;
++#endif
++ return &utrace_sparc64_native_view;
+ }
+
++
+ /* To get the necessary page struct, access_process_vm() first calls
+ * get_user_pages(). This has done a flush_dcache_page() on the
+ * accessed page. Then our caller (copy_{to,from}_user_page()) did
+--- linux-2.6/arch/sparc64/kernel/systbls.S
++++ linux-2.6/arch/sparc64/kernel/systbls.S
+@@ -24,7 +24,7 @@ sys_call_table32:
+ /*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys32_chown16, sys32_mknod
+ /*15*/ .word sys_chmod, sys32_lchown16, sparc_brk, sys32_perfctr, sys32_lseek
+ /*20*/ .word sys_getpid, sys_capget, sys_capset, sys32_setuid16, sys32_getuid16
+-/*25*/ .word sys32_vmsplice, sys_ptrace, sys_alarm, sys32_sigaltstack, sys32_pause
++/*25*/ .word sys32_vmsplice, compat_sys_ptrace, sys_alarm, sys32_sigaltstack, sys32_pause
+ /*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys32_access, sys32_nice
+ .word sys_chown, sys_sync, sys32_kill, compat_sys_newstat, sys32_sendfile
+ /*40*/ .word compat_sys_newlstat, sys_dup, sys_pipe, compat_sys_times, sys_getuid
+@@ -170,7 +170,7 @@ sunos_sys_table:
+ .word sys_chmod, sys32_lchown16, sunos_brk
+ .word sunos_nosys, sys32_lseek, sunos_getpid
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+- .word sunos_getuid, sunos_nosys, sys_ptrace
++ .word sunos_getuid, sunos_nosys, compat_sys_ptrace
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sys_access, sunos_nosys, sunos_nosys
linux-2.6-utrace-regset.patch:
View full diff with command:
/usr/bin/cvs -f diff -kk -u -N -r 1.2 -r 1.3 linux-2.6-utrace-regset.patch
Index: linux-2.6-utrace-regset.patch
===================================================================
RCS file: linux-2.6-utrace-regset.patch
diff -N linux-2.6-utrace-regset.patch
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ linux-2.6-utrace-regset.patch 20 Jul 2007 18:48:03 -0000 1.3
@@ -0,0 +1,4713 @@
+[PATCH 2] utrace: register sets
+
+This provides a new uniform interface in <linux/tracehook.h> for accessing
+registers and similar per-thread machine resources. The old architecture
+ptrace code for accessing register state is rolled into new functions to
+flesh out the utrace_regset interface. Nothing yet uses this interface.
+The hope is that this interface can cover most of the machine-dependent
+issues for any higher-level tracing/debugging interface.
+
+Signed-off-by: Roland McGrath <roland at redhat.com>
+
+---
+
+ arch/i386/kernel/i387.c | 143 +++---
+ arch/i386/kernel/ptrace.c | 822 ++++++++++++++++++++---------------
+ arch/x86_64/ia32/ptrace32.c | 719 ++++++++++++++++++++-----------
+ arch/x86_64/ia32/fpu32.c | 92 +++-
+ arch/x86_64/kernel/ptrace.c | 730 +++++++++++++++++++------------
+ arch/powerpc/kernel/Makefile | 4
+ arch/powerpc/kernel/ptrace32.c | 443 -------------------
+ arch/powerpc/kernel/ptrace.c | 718 +++++++++++++++----------------
+ arch/powerpc/kernel/ptrace-common.h | 145 ------
+ kernel/ptrace.c | 8
+ include/linux/tracehook.h | 244 ++++++++++
+ include/asm-i386/i387.h | 13 -
+ include/asm-x86_64/fpu32.h | 3
+ include/asm-x86_64/tracehook.h | 8
+ 14 files changed, 2124 insertions(+), 1968 deletions(-)
+ delete arch/powerpc/kernel/ptrace32.c
+ delete arch/powerpc/kernel/ptrace-common.h
+
+--- linux-2.6/arch/i386/kernel/i387.c
++++ linux-2.6/arch/i386/kernel/i387.c
+@@ -222,14 +222,10 @@ void set_fpu_twd( struct task_struct *ts
+ * FXSR floating point environment conversions.
+ */
+
+-static int convert_fxsr_to_user( struct _fpstate __user *buf,
+- struct i387_fxsave_struct *fxsave )
++static inline void
++convert_fxsr_env_to_i387(unsigned long env[7],
++ struct i387_fxsave_struct *fxsave)
+ {
+- unsigned long env[7];
+- struct _fpreg __user *to;
+- struct _fpxreg *from;
+- int i;
+-
+ env[0] = (unsigned long)fxsave->cwd | 0xffff0000ul;
+ env[1] = (unsigned long)fxsave->swd | 0xffff0000ul;
+ env[2] = twd_fxsr_to_i387(fxsave);
+@@ -237,7 +233,17 @@ static int convert_fxsr_to_user( struct
+ env[4] = fxsave->fcs | ((unsigned long)fxsave->fop << 16);
+ env[5] = fxsave->foo;
+ env[6] = fxsave->fos;
++}
++
++static int convert_fxsr_to_user(struct _fpstate __user *buf,
++ struct i387_fxsave_struct *fxsave)
++{
++ unsigned long env[7];
++ struct _fpreg __user *to;
++ struct _fpxreg *from;
++ int i;
+
++ convert_fxsr_env_to_i387(env, fxsave);
+ if ( __copy_to_user( buf, env, 7 * sizeof(unsigned long) ) )
+ return 1;
+
+@@ -255,6 +261,20 @@ static int convert_fxsr_to_user( struct
+ return 0;
+ }
+
++static inline void
++convert_fxsr_env_from_i387(struct i387_fxsave_struct *fxsave,
++ const unsigned long env[7])
++{
++ fxsave->cwd = (unsigned short)(env[0] & 0xffff);
++ fxsave->swd = (unsigned short)(env[1] & 0xffff);
++ fxsave->twd = twd_i387_to_fxsr((unsigned short)(env[2] & 0xffff));
++ fxsave->fip = env[3];
++ fxsave->fop = (unsigned short)((env[4] & 0xffff0000ul) >> 16);
++ fxsave->fcs = (env[4] & 0xffff);
++ fxsave->foo = env[5];
++ fxsave->fos = env[6];
++}
++
+ static int convert_fxsr_from_user( struct i387_fxsave_struct *fxsave,
+ struct _fpstate __user *buf )
+ {
+@@ -266,14 +286,7 @@ static int convert_fxsr_from_user( struc
+ if ( __copy_from_user( env, buf, 7 * sizeof(long) ) )
+ return 1;
+
+- fxsave->cwd = (unsigned short)(env[0] & 0xffff);
+- fxsave->swd = (unsigned short)(env[1] & 0xffff);
+- fxsave->twd = twd_i387_to_fxsr((unsigned short)(env[2] & 0xffff));
+- fxsave->fip = env[3];
+- fxsave->fop = (unsigned short)((env[4] & 0xffff0000ul) >> 16);
+- fxsave->fcs = (env[4] & 0xffff);
+- fxsave->foo = env[5];
+- fxsave->fos = env[6];
++ convert_fxsr_env_from_i387(fxsave, env);
+
+ to = (struct _fpxreg *) &fxsave->st_space[0];
+ from = &buf->_st[0];
+@@ -388,88 +401,82 @@ int restore_i387( struct _fpstate __user
+ * ptrace request handlers.
+ */
+
+-static inline int get_fpregs_fsave( struct user_i387_struct __user *buf,
+- struct task_struct *tsk )
++static inline void get_fpregs_fsave(struct user_i387_struct *buf,
++ struct task_struct *tsk)
+ {
+- return __copy_to_user( buf, &tsk->thread.i387.fsave,
+- sizeof(struct user_i387_struct) );
++ memcpy(buf, &tsk->thread.i387.fsave, sizeof(struct user_i387_struct));
+ }
+
+-static inline int get_fpregs_fxsave( struct user_i387_struct __user *buf,
+- struct task_struct *tsk )
++static inline void get_fpregs_fxsave(struct user_i387_struct *buf,
++ struct task_struct *tsk)
+ {
+- return convert_fxsr_to_user( (struct _fpstate __user *)buf,
+- &tsk->thread.i387.fxsave );
++ struct _fpreg *to;
++ const struct _fpxreg *from;
++ unsigned int i;
++
++ convert_fxsr_env_to_i387((unsigned long *) buf,
++ &tsk->thread.i387.fxsave);
++
++ to = (struct _fpreg *) buf->st_space;
++ from = (const struct _fpxreg *) &tsk->thread.i387.fxsave.st_space[0];
++ for (i = 0; i < 8; i++, to++, from++)
++ *to = *(const struct _fpreg *) from;
+ }
+
+-int get_fpregs( struct user_i387_struct __user *buf, struct task_struct *tsk )
++int get_fpregs(struct user_i387_struct *buf, struct task_struct *tsk)
+ {
+ if ( HAVE_HWFP ) {
+- if ( cpu_has_fxsr ) {
+- return get_fpregs_fxsave( buf, tsk );
+- } else {
+- return get_fpregs_fsave( buf, tsk );
+- }
++ if (cpu_has_fxsr)
++ get_fpregs_fxsave(buf, tsk);
++ else
++ get_fpregs_fsave(buf, tsk);
++ return 0;
+ } else {
+ return save_i387_soft( &tsk->thread.i387.soft,
+ (struct _fpstate __user *)buf );
+ }
+ }
+
+-static inline int set_fpregs_fsave( struct task_struct *tsk,
+- struct user_i387_struct __user *buf )
++static inline void set_fpregs_fsave(struct task_struct *tsk,
++ const struct user_i387_struct *buf)
+ {
+- return __copy_from_user( &tsk->thread.i387.fsave, buf,
+- sizeof(struct user_i387_struct) );
++ memcpy(&tsk->thread.i387.fsave, buf, sizeof(struct user_i387_struct));
+ }
+
+-static inline int set_fpregs_fxsave( struct task_struct *tsk,
+- struct user_i387_struct __user *buf )
++static inline void set_fpregs_fxsave(struct task_struct *tsk,
++ const struct user_i387_struct *buf)
+ {
+- return convert_fxsr_from_user( &tsk->thread.i387.fxsave,
+- (struct _fpstate __user *)buf );
++ struct _fpxreg *to;
++ const struct _fpreg *from;
++ unsigned int i;
++
++ convert_fxsr_env_from_i387(&tsk->thread.i387.fxsave,
++ (unsigned long *) buf);
++
++ to = (struct _fpxreg *) &tsk->thread.i387.fxsave.st_space[0];
++ from = (const struct _fpreg *) buf->st_space;
++ for (i = 0; i < 8; i++, to++, from++)
++ *(struct _fpreg *) to = *from;
+ }
+
+-int set_fpregs( struct task_struct *tsk, struct user_i387_struct __user *buf )
++int set_fpregs(struct task_struct *tsk, const struct user_i387_struct *buf)
+ {
[...4320 lines suppressed...]
++ * @e_machine: ELF %EM_* value for which this is the native view, if any.
++ *
++ * A regset view is a collection of regsets (&struct utrace_regset,
++ * above). This describes all the state of a thread that can be seen
++ * from a given architecture/ABI environment. More than one view might
++ * refer to the same &struct utrace_regset, or more than one regset
++ * might refer to the same machine-specific state in the thread. For
++ * example, a 32-bit thread's state could be examined from the 32-bit
++ * view or from the 64-bit view. Either method reaches the same thread
++ * register state, doing appropriate widening or truncation.
++ */
++struct utrace_regset_view {
++ const char *name;
++ const struct utrace_regset *regsets;
++ unsigned int n;
++ u16 e_machine;
++};
++
++/*
++ * This is documented here rather than at the definition sites because its
++ * implementation is machine-dependent but its interface is universal.
++ */
++/**
++ * utrace_native_view - Return the process's native regset view.
++ * @tsk: a thread of the process in question
++ *
++ * Return the &struct utrace_regset_view that is native for the given process.
++ * For example, what it would access when it called ptrace().
++ * Throughout the life of the process, this only changes at exec.
++ */
++const struct utrace_regset_view *utrace_native_view(struct task_struct *tsk);
++
++
++/*
++ * These are helpers for writing regset get/set functions in arch code.
++ * Because @start_pos and @end_pos are always compile-time constants,
++ * these are inlined into very little code though they look large.
++ *
++ * Use one or more calls sequentially for each chunk of regset data stored
++ * contiguously in memory. Call with constants for @start_pos and @end_pos,
++ * giving the range of byte positions in the regset that data corresponds
++ * to; @end_pos can be -1 if this chunk is at the end of the regset layout.
++ * Each call updates the arguments to point past its chunk.
++ */
++
++static inline int
++utrace_regset_copyout(unsigned int *pos, unsigned int *count,
++ void **kbuf, void __user **ubuf,
++ const void *data, int start_pos, int end_pos)
++{
++ if (*count == 0)
++ return 0;
++ BUG_ON(*pos < start_pos);
++ if (end_pos < 0 || *pos < end_pos) {
++ unsigned int copy = (end_pos < 0 ? *count
++ : min(*count, end_pos - *pos));
++ data += *pos - start_pos;
++ if (*kbuf) {
++ memcpy(*kbuf, data, copy);
++ *kbuf += copy;
++ }
++ else if (copy_to_user(*ubuf, data, copy))
++ return -EFAULT;
++ else
++ *ubuf += copy;
++ *pos += copy;
++ *count -= copy;
++ }
++ return 0;
++}
++
++static inline int
++utrace_regset_copyin(unsigned int *pos, unsigned int *count,
++ const void **kbuf, const void __user **ubuf,
++ void *data, int start_pos, int end_pos)
++{
++ if (*count == 0)
++ return 0;
++ BUG_ON(*pos < start_pos);
++ if (end_pos < 0 || *pos < end_pos) {
++ unsigned int copy = (end_pos < 0 ? *count
++ : min(*count, end_pos - *pos));
++ data += *pos - start_pos;
++ if (*kbuf) {
++ memcpy(data, *kbuf, copy);
++ *kbuf += copy;
++ }
++ else if (copy_from_user(data, *ubuf, copy))
++ return -EFAULT;
++ else
++ *ubuf += copy;
++ *pos += copy;
++ *count -= copy;
++ }
++ return 0;
++}
++
++/*
++ * These two parallel the two above, but for portions of a regset layout
++ * that always read as all-zero or for which writes are ignored.
++ */
++static inline int
++utrace_regset_copyout_zero(unsigned int *pos, unsigned int *count,
++ void **kbuf, void __user **ubuf,
++ int start_pos, int end_pos)
++{
++ if (*count == 0)
++ return 0;
++ BUG_ON(*pos < start_pos);
++ if (end_pos < 0 || *pos < end_pos) {
++ unsigned int copy = (end_pos < 0 ? *count
++ : min(*count, end_pos - *pos));
++ if (*kbuf) {
++ memset(*kbuf, 0, copy);
++ *kbuf += copy;
++ }
++ else if (clear_user(*ubuf, copy))
++ return -EFAULT;
++ else
++ *ubuf += copy;
++ *pos += copy;
++ *count -= copy;
++ }
++ return 0;
++}
++
++static inline int
++utrace_regset_copyin_ignore(unsigned int *pos, unsigned int *count,
++ const void **kbuf, const void __user **ubuf,
++ int start_pos, int end_pos)
++{
++ if (*count == 0)
++ return 0;
++ BUG_ON(*pos < start_pos);
++ if (end_pos < 0 || *pos < end_pos) {
++ unsigned int copy = (end_pos < 0 ? *count
++ : min(*count, end_pos - *pos));
++ if (*kbuf)
++ *kbuf += copy;
++ else
++ *ubuf += copy;
++ *pos += copy;
++ *count -= copy;
++ }
++ return 0;
++}
++
++
+ /*
+ * Following are entry points from core code, where the user debugging
+ * support can affect the normal behavior. The locking situation is
+--- linux-2.6/include/asm-i386/i387.h
++++ linux-2.6/include/asm-i386/i387.h
+@@ -130,17 +130,12 @@ extern int save_i387( struct _fpstate __
+ extern int restore_i387( struct _fpstate __user *buf );
+
+ /*
+- * ptrace request handers...
++ * ptrace request handlers...
+ */
+-extern int get_fpregs( struct user_i387_struct __user *buf,
+- struct task_struct *tsk );
+-extern int set_fpregs( struct task_struct *tsk,
+- struct user_i387_struct __user *buf );
++extern int get_fpregs(struct user_i387_struct *, struct task_struct *);
++extern int set_fpregs(struct task_struct *, const struct user_i387_struct *);
++extern void updated_fpxregs(struct task_struct *tsk);
+
+-extern int get_fpxregs( struct user_fxsr_struct __user *buf,
+- struct task_struct *tsk );
+-extern int set_fpxregs( struct task_struct *tsk,
+- struct user_fxsr_struct __user *buf );
+
+ /*
+ * FPU state for core dumps...
+--- linux-2.6/include/asm-x86_64/fpu32.h
++++ linux-2.6/include/asm-x86_64/fpu32.h
+@@ -7,4 +7,7 @@ int restore_i387_ia32(struct task_struct
+ int save_i387_ia32(struct task_struct *tsk, struct _fpstate_ia32 __user *buf,
+ struct pt_regs *regs, int fsave);
+
++int get_fpregs32(struct user_i387_ia32_struct *, struct task_struct *);
++int set_fpregs32(struct task_struct *, const struct user_i387_ia32_struct *);
++
+ #endif
+--- linux-2.6/include/asm-x86_64/tracehook.h
++++ linux-2.6/include/asm-x86_64/tracehook.h
+@@ -48,4 +48,12 @@ static inline void tracehook_abort_sysca
+ regs->orig_rax = -1L;
+ }
+
++/*
++ * These are used directly by some of the regset code.
++ */
++extern const struct utrace_regset_view utrace_x86_64_native;
++#ifdef CONFIG_IA32_EMULATION
++extern const struct utrace_regset_view utrace_ia32_view;
++#endif
++
+ #endif
linux-2.6-utrace-tracehook-avr32.patch:
--- NEW FILE linux-2.6-utrace-tracehook-avr32.patch ---
[PATCH 1e] utrace: tracehook for AVR32
From: Haavard Skinnemoen <hskinnemoen at atmel.com>
This patch does the initial tracehook conversion for AVR32.
Signed-off-by: Haavard Skinnemoen <hskinnemoen at atmel.com>
Signed-off-by: Roland McGrath <roland at redhat.com>
---
arch/avr32/kernel/ptrace.c | 102 ++++++++++----------------------------
arch/avr32/kernel/process.c | 2 -
arch/avr32/kernel/entry-avr32b.S | 10 +++-
include/asm-avr32/tracehook.h | 62 +++++++++++++++++++++++
4 files changed, 96 insertions(+), 80 deletions(-)
create include/asm-avr32/tracehook.h
--- linux-2.6/arch/avr32/kernel/ptrace.c
+++ linux-2.6/arch/avr32/kernel/ptrace.c
@@ -5,20 +5,16 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#undef DEBUG
+#include <linux/compile.h>
+#include <linux/elf.h>
+#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/user.h>
-#include <linux/security.h>
-#include <linux/unistd.h>
-#include <linux/notifier.h>
-
-#include <asm/traps.h>
-#include <asm/uaccess.h>
-#include <asm/ocd.h>
+
#include <asm/mmu_context.h>
#include <linux/kdebug.h>
@@ -28,52 +24,7 @@ static struct pt_regs *get_user_regs(str
THREAD_SIZE - sizeof(struct pt_regs));
}
-static void ptrace_single_step(struct task_struct *tsk)
-{
- pr_debug("ptrace_single_step: pid=%u, SR=0x%08lx\n",
- tsk->pid, tsk->thread.cpu_context.sr);
- if (!(tsk->thread.cpu_context.sr & SR_D)) {
- /*
- * Set a breakpoint at the current pc to force the
- * process into debug mode. The syscall/exception
- * exit code will set a breakpoint at the return
- * address when this flag is set.
- */
- pr_debug("ptrace_single_step: Setting TIF_BREAKPOINT\n");
- set_tsk_thread_flag(tsk, TIF_BREAKPOINT);
- }
-
- /* The monitor code will do the actual step for us */
- set_tsk_thread_flag(tsk, TIF_SINGLE_STEP);
-}
-
-/*
- * Called by kernel/ptrace.c when detaching
- *
- * Make sure any single step bits, etc. are not set
- */
-void ptrace_disable(struct task_struct *child)
-{
- clear_tsk_thread_flag(child, TIF_SINGLE_STEP);
-}
-
-/*
- * Handle hitting a breakpoint
- */
-static void ptrace_break(struct task_struct *tsk, struct pt_regs *regs)
-{
- siginfo_t info;
-
- info.si_signo = SIGTRAP;
- info.si_errno = 0;
- info.si_code = TRAP_BRKPT;
- info.si_addr = (void __user *)instruction_pointer(regs);
-
- pr_debug("ptrace_break: Sending SIGTRAP to PID %u (pc = 0x%p)\n",
- tsk->pid, info.si_addr);
- force_sig_info(SIGTRAP, &info, tsk);
-}
-
+#if 0
/*
* Read the word at offset "offset" into the task's "struct user". We
* actually access the pt_regs struct stored on the kernel stack.
@@ -257,32 +208,31 @@ long arch_ptrace(struct task_struct *chi
pr_debug("sys_ptrace returning %d (DC = 0x%08lx)\n", ret, __mfdr(DBGREG_DC));
return ret;
}
+#endif
-asmlinkage void syscall_trace(void)
+asmlinkage void syscall_trace(struct pt_regs *regs, int is_exit)
{
- pr_debug("syscall_trace called\n");
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return;
- if (!(current->ptrace & PT_PTRACED))
- return;
- pr_debug("syscall_trace: notifying parent\n");
- /* The 0x80 provides a way for the tracing parent to
- * distinguish between a syscall stop and SIGTRAP delivery */
- ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
- ? 0x80 : 0));
+ tracehook_report_syscall(regs, is_exit);
+}
- /*
- * this isn't the same as continuing with a signal, but it
- * will do for normal use. strace only continues with a
- * signal if the stopping signal is not SIGTRAP. -brl
- */
- if (current->exit_code) {
- pr_debug("syscall_trace: sending signal %d to PID %u\n",
- current->exit_code, current->pid);
- send_sig(current->exit_code, current, 1);
- current->exit_code = 0;
- }
+/*
+ * Handle hitting a breakpoint
+ */
+static void do_breakpoint(struct task_struct *tsk, struct pt_regs *regs)
+{
+ siginfo_t info;
+
+ info.si_signo = SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = TRAP_BRKPT;
+ info.si_addr = (void __user *)instruction_pointer(regs);
+
+ pr_debug("ptrace_break: Sending SIGTRAP to PID %u (pc = 0x%p)\n",
+ tsk->pid, info.si_addr);
+ force_sig_info(SIGTRAP, &info, tsk);
}
asmlinkage void do_debug_priv(struct pt_regs *regs)
@@ -361,10 +311,10 @@ asmlinkage void do_debug(struct pt_regs
__mtdr(DBGREG_DC, dc);
clear_thread_flag(TIF_SINGLE_STEP);
- ptrace_break(current, regs);
+ do_breakpoint(current, regs);
}
} else {
/* regular breakpoint */
- ptrace_break(current, regs);
+ do_breakpoint(current, regs);
}
}
--- linux-2.6/arch/avr32/kernel/process.c
+++ linux-2.6/arch/avr32/kernel/process.c
@@ -382,8 +382,6 @@ asmlinkage int sys_execve(char __user *u
goto out;
error = do_execve(filename, uargv, uenvp, regs);
- if (error == 0)
- current->ptrace &= ~PT_DTRACE;
putname(filename);
out:
--- linux-2.6/arch/avr32/kernel/entry-avr32b.S
+++ linux-2.6/arch/avr32/kernel/entry-avr32b.S
@@ -223,15 +223,21 @@ ret_from_fork:
rjmp syscall_exit_cont
syscall_trace_enter:
- pushm r8-r12
+ mov r12, sp /* regs */
+ mov r11, 0 /* is_exit */
rcall syscall_trace
- popm r8-r12
+
+ /* syscall_trace may update r8, so reload r8-r12 from regs. */
+ sub lr, sp, -REG_R12
+ ldm lr, r8-r12
rjmp syscall_trace_cont
syscall_exit_work:
bld r1, TIF_SYSCALL_TRACE
brcc 1f
unmask_interrupts
+ mov r12, sp
+ mov r11, 1
rcall syscall_trace
mask_interrupts
ld.w r1, r0[TI_flags]
--- linux-2.6/include/asm-avr32/tracehook.h
+++ linux-2.6/include/asm-avr32/tracehook.h
@@ -0,0 +1,62 @@
+/*
+ * Tracing hooks for AVR32
+ *
+ * Copyright (C) 2007 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_AVR32_TRACEHOOK_H
+#define _ASM_AVR32_TRACEHOOK_H
+
+#include <linux/sched.h>
+
+#define ARCH_HAS_SINGLE_STEP 1
+
+static inline void tracehook_enable_single_step(struct task_struct *tsk)
+{
+ /*
+ * If the process is stopped in debug mode, simply set
+ * TIF_SINGLE_STEP to tell the monitor code to set the single
+ * step bit in DC before returning.
+ *
+ * Otherwise, we need to set a breakpoint at the return
+ * address before returning to userspace. TIF_BREAKPOINT will
+ * tell the syscall/exception exit code to do this.
+ */
+ if (!(tsk->thread.cpu_context.sr & SR_D))
+ set_tsk_thread_flag(tsk, TIF_BREAKPOINT);
+
+ set_tsk_thread_flag(tsk, TIF_SINGLE_STEP);
+}
+
+static inline void tracehook_disable_single_step(struct task_struct *tsk)
+{
+ clear_tsk_thread_flag(tsk, TIF_BREAKPOINT);
+ clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP);
+}
+
+static inline int tracehook_single_step_enabled(struct task_struct *tsk)
+{
+ return test_tsk_thread_flag(tsk, TIF_SINGLE_STEP);
+}
+
+static inline void tracehook_enable_syscall_trace(struct task_struct *tsk)
+{
+ set_tsk_thread_flag(tsk, TIF_SYSCALL_TRACE);
+}
+
+static inline void tracehook_disable_syscall_trace(struct task_struct *tsk)
+{
+ clear_tsk_thread_flag(tsk, TIF_SYSCALL_TRACE);
+}
+
+static inline void tracehook_abort_syscall(struct pt_regs *regs)
+{
+ /* Invalid system call number => return -ENOSYS */
+ regs->r8 = -1;
+}
+
+
+#endif /* _ASM_AVR32_TRACEHOOK_H */
linux-2.6-utrace-tracehook-ia64.patch:
Index: linux-2.6-utrace-tracehook-ia64.patch
===================================================================
RCS file: linux-2.6-utrace-tracehook-ia64.patch
diff -N linux-2.6-utrace-tracehook-ia64.patch
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ linux-2.6-utrace-tracehook-ia64.patch 20 Jul 2007 18:48:03 -0000 1.3
@@ -0,0 +1,248 @@
+[PATCH 1a] utrace: tracehook for ia64
+
+This patch does the initial tracehook conversion for ia64.
+
+Signed-off-by: Roland McGrath <roland at redhat.com>
+Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy at intel.com>
+Signed-off-by: Bibo mao <bibo.mao at intel.com>
+
+---
+
+ arch/ia64/ia32/ia32_entry.S | 2 +
+ arch/ia64/ia32/sys_ia32.c | 23 ++-----------
+ arch/ia64/kernel/ptrace.c | 39 ++++++----------------
+ arch/ia64/kernel/signal.c | 4 ++
+ include/asm-ia64/tracehook.h | 73 ++++++++++++++++++++++++++++++++++++++++++
+ 5 files changed, 91 insertions(+), 50 deletions(-)
+ create include/asm-ia64/tracehook.h
+
+--- linux-2.6/arch/ia64/ia32/ia32_entry.S
++++ linux-2.6/arch/ia64/ia32/ia32_entry.S
+@@ -199,7 +199,7 @@ ia32_syscall_table:
+ data8 sys_setuid /* 16-bit version */
+ data8 sys_getuid /* 16-bit version */
+ data8 compat_sys_stime /* 25 */
+- data8 sys32_ptrace
++ data8 compat_sys_ptrace
+ data8 sys32_alarm
+ data8 sys_ni_syscall
+ data8 sys32_pause
+--- linux-2.6/arch/ia64/ia32/sys_ia32.c
++++ linux-2.6/arch/ia64/ia32/sys_ia32.c
+@@ -1436,25 +1436,6 @@ sys32_waitpid (int pid, unsigned int *st
+ return compat_sys_wait4(pid, stat_addr, options, NULL);
+ }
+
+-static unsigned int
+-ia32_peek (struct task_struct *child, unsigned long addr, unsigned int *val)
+-{
+- size_t copied;
+- unsigned int ret;
+-
+- copied = access_process_vm(child, addr, val, sizeof(*val), 0);
+- return (copied != sizeof(ret)) ? -EIO : 0;
+-}
+-
+-static unsigned int
+-ia32_poke (struct task_struct *child, unsigned long addr, unsigned int val)
+-{
+-
+- if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val))
+- return -EIO;
+- return 0;
+-}
+-
+ /*
+ * The order in which registers are stored in the ptrace regs structure
+ */
+@@ -1752,6 +1733,7 @@ restore_ia32_fpxstate (struct task_struc
+ return 0;
+ }
+
++#if 0 /* XXX */
+ asmlinkage long
+ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data)
+ {
+@@ -1859,9 +1841,11 @@ sys32_ptrace (int request, pid_t pid, un
+ compat_ptr(data));
+ break;
+
++#if 0 /* XXX */
+ case PTRACE_GETEVENTMSG:
+ ret = put_user(child->ptrace_message, (unsigned int __user *) compat_ptr(data));
+ break;
++#endif
+
+ case PTRACE_SYSCALL: /* continue, stop after next syscall */
+ case PTRACE_CONT: /* restart after signal. */
+@@ -1882,6 +1866,7 @@ sys32_ptrace (int request, pid_t pid, un
+ unlock_kernel();
+ return ret;
+ }
++#endif
+
+ typedef struct {
+ unsigned int ss_sp;
+--- linux-2.6/arch/ia64/kernel/ptrace.c
++++ linux-2.6/arch/ia64/kernel/ptrace.c
+@@ -12,6 +12,7 @@
+ #include <linux/mm.h>
+ #include <linux/errno.h>
+ #include <linux/ptrace.h>
++#include <linux/tracehook.h>
+ #include <linux/smp_lock.h>
+ #include <linux/user.h>
+ #include <linux/security.h>
+@@ -1598,28 +1599,6 @@ sys_ptrace (long request, pid_t pid, uns
+ return ret;
+ }
+
+-
+-static void
+-syscall_trace (void)
+-{
+- /*
+- * The 0x80 provides a way for the tracing parent to
+- * distinguish between a syscall stop and SIGTRAP delivery.
+- */
+- ptrace_notify(SIGTRAP
+- | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
+-
+- /*
+- * This isn't the same as continuing with a signal, but it
+- * will do for normal use. strace only continues with a
+- * signal if the stopping signal is not SIGTRAP. -brl
+- */
+- if (current->exit_code) {
+- send_sig(current->exit_code, current, 1);
+- current->exit_code = 0;
+- }
+-}
+-
+ /* "asmlinkage" so the input arguments are preserved... */
+
+ asmlinkage void
+@@ -1627,9 +1606,8 @@ syscall_trace_enter (long arg0, long arg
+ long arg4, long arg5, long arg6, long arg7,
+ struct pt_regs regs)
+ {
+- if (test_thread_flag(TIF_SYSCALL_TRACE)
+- && (current->ptrace & PT_PTRACED))
+- syscall_trace();
++ if (test_thread_flag(TIF_SYSCALL_TRACE))
++ tracehook_report_syscall(®s, 0);
+
+ if (unlikely(current->audit_context)) {
+ long syscall;
+@@ -1664,8 +1642,11 @@ syscall_trace_leave (long arg0, long arg
+ audit_syscall_exit(success, result);
+ }
+
+- if ((test_thread_flag(TIF_SYSCALL_TRACE)
+- || test_thread_flag(TIF_SINGLESTEP))
+- && (current->ptrace & PT_PTRACED))
+- syscall_trace();
++ if (test_thread_flag(TIF_SYSCALL_TRACE))
++ tracehook_report_syscall(®s, 1);
++
++ if (test_thread_flag(TIF_SINGLESTEP)) {
++ force_sig(SIGTRAP, current); /* XXX */
++ tracehook_report_syscall_step(®s);
++ }
+ }
+--- linux-2.6/arch/ia64/kernel/signal.c
++++ linux-2.6/arch/ia64/kernel/signal.c
+@@ -10,7 +10,7 @@
+ #include <linux/errno.h>
+ #include <linux/kernel.h>
+ #include <linux/mm.h>
+-#include <linux/ptrace.h>
++#include <linux/tracehook.h>
+ #include <linux/sched.h>
+ #include <linux/signal.h>
+ #include <linux/smp.h>
+@@ -429,6 +429,8 @@ handle_signal (unsigned long sig, struct
+ sigaddset(¤t->blocked, sig);
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
++
++ tracehook_report_handle_signal(sig, ka, oldset, &scr->pt);
+ return 1;
+ }
+
+--- linux-2.6/include/asm-ia64/tracehook.h
++++ linux-2.6/include/asm-ia64/tracehook.h
+@@ -0,0 +1,73 @@
++/*
++ * Copyright (C)2006 Intel Co
++ * Anil S Keshavamurthy <anil.s.keshavamurthy at intel.com>
++ * and Bibo Mao <bibo.mao at intel.com> adapted from i386.
++ *
++ * Tracing hooks, ia64 CPU support
++ */
++
++#ifndef _ASM_TRACEHOOK_H
++#define _ASM_TRACEHOOK_H 1
++
++#include <linux/sched.h>
++#include <asm/ptrace.h>
++
++/*
++ * See linux/tracehook.h for the descriptions of what these need to do.
++ */
++
++#define ARCH_HAS_SINGLE_STEP (1)
++#define ARCH_HAS_BLOCK_STEP (1)
++
++static inline void tracehook_enable_single_step(struct task_struct *tsk)
++{
++ struct pt_regs *pt = task_pt_regs(tsk);
++ ia64_psr(pt)->ss = 1;
++ set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
++}
++
++static inline void tracehook_disable_single_step(struct task_struct *tsk)
++{
++ struct pt_regs *pt = task_pt_regs(tsk);
++ ia64_psr(pt)->ss = 0;
++ if (ia64_psr(pt)->tb == 0)
++ clear_tsk_thread_flag(tsk, TIF_SINGLESTEP);
++}
++
++static inline void tracehook_enable_block_step(struct task_struct *tsk)
++{
++ struct pt_regs *pt = task_pt_regs(tsk);
++ ia64_psr(pt)->tb = 1;
++ set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
++}
++
++static inline void tracehook_disable_block_step(struct task_struct *tsk)
++{
++ struct pt_regs *pt = task_pt_regs(tsk);
++ ia64_psr(pt)->tb = 0;
++ if (ia64_psr(pt)->ss == 0)
++ clear_tsk_thread_flag(tsk, TIF_SINGLESTEP);
++}
++
++static inline void tracehook_enable_syscall_trace(struct task_struct *tsk)
++{
++ set_tsk_thread_flag(tsk, TIF_SYSCALL_TRACE);
++}
++
++static inline void tracehook_disable_syscall_trace(struct task_struct *tsk)
++{
++ clear_tsk_thread_flag(tsk, TIF_SYSCALL_TRACE);
++}
++
++static inline int tracehook_single_step_enabled(struct task_struct *tsk)
++{
++ struct pt_regs *pt = task_pt_regs(tsk);
++ return ia64_psr(pt)->ss;
++}
++
++static inline void tracehook_abort_syscall(struct pt_regs *regs)
++{
++ regs->r15 = -1L;
++}
++
++#endif
linux-2.6-utrace-tracehook-s390.patch:
Index: linux-2.6-utrace-tracehook-s390.patch
===================================================================
RCS file: linux-2.6-utrace-tracehook-s390.patch
diff -N linux-2.6-utrace-tracehook-s390.patch
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ linux-2.6-utrace-tracehook-s390.patch 20 Jul 2007 18:48:03 -0000 1.3
@@ -0,0 +1,267 @@
+[PATCH 1c] utrace: tracehook for s390
+
+This patch does the initial tracehook conversion for s390.
+
+Signed-off-by: Roland McGrath <roland at redhat.com>
+Signed-off-by: David Wilder <dwilder at us.ibm.com>
+
+---
+
+ arch/s390/kernel/traps.c | 6 ++--
+ arch/s390/kernel/compat_signal.c | 5 ++-
+ arch/s390/kernel/ptrace.c | 62 +++++++++++++++++++++-----------------
+ arch/s390/kernel/signal.c | 3 ++
+ include/asm-s390/tracehook.h | 46 ++++++++++++++++++++++++++++
+ 5 files changed, 90 insertions(+), 32 deletions(-)
+ create include/asm-s390/tracehook.h
+
+--- linux-2.6/arch/s390/kernel/traps.c
++++ linux-2.6/arch/s390/kernel/traps.c
+@@ -18,7 +18,7 @@
+ #include <linux/kernel.h>
+ #include <linux/string.h>
+ #include <linux/errno.h>
+-#include <linux/ptrace.h>
++#include <linux/tracehook.h>
+ #include <linux/timer.h>
+ #include <linux/mm.h>
+ #include <linux/smp.h>
+@@ -338,7 +338,7 @@ void __kprobes do_single_step(struct pt_
+ SIGTRAP) == NOTIFY_STOP){
+ return;
+ }
+- if ((current->ptrace & PT_PTRACED) != 0)
++ if (tracehook_consider_fatal_signal(current, SIGTRAP))
+ force_sig(SIGTRAP, current);
+ }
+
+@@ -439,7 +439,7 @@ static void illegal_op(struct pt_regs *
+ if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
+ return;
+ if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
+- if (current->ptrace & PT_PTRACED)
++ if (tracehook_consider_fatal_signal(current, SIGTRAP))
+ force_sig(SIGTRAP, current);
+ else
+ signal = SIGILL;
+--- linux-2.6/arch/s390/kernel/compat_signal.c
++++ linux-2.6/arch/s390/kernel/compat_signal.c
+@@ -27,6 +27,7 @@
+ #include <asm/ucontext.h>
+ #include <asm/uaccess.h>
+ #include <asm/lowcore.h>
++#include <linux/tracehook.h>
+ #include "compat_linux.h"
+ #include "compat_ptrace.h"
+
+@@ -580,7 +581,9 @@ handle_signal32(unsigned long sig, struc
+ sigaddset(¤t->blocked,sig);
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
++
++ tracehook_report_handle_signal(sig, ka, oldset, regs);
+ }
++
+ return ret;
+ }
+-
+--- linux-2.6/arch/s390/kernel/ptrace.c
++++ linux-2.6/arch/s390/kernel/ptrace.c
+@@ -29,6 +29,7 @@
+ #include <linux/smp_lock.h>
+ #include <linux/errno.h>
+ #include <linux/ptrace.h>
++#include <linux/tracehook.h>
+ #include <linux/user.h>
+ #include <linux/security.h>
+ #include <linux/audit.h>
+@@ -84,18 +85,35 @@ FixPerRegisters(struct task_struct *task
+ per_info->control_regs.bits.storage_alt_space_ctl = 1;
+ else
+ per_info->control_regs.bits.storage_alt_space_ctl = 0;
++
++ if (task == current)
++ /*
++ * These registers are loaded in __switch_to on
++ * context switch. We must load them now if
++ * touching the current thread.
++ */
++ __ctl_load(per_info->control_regs.words.cr, 9, 11);
+ }
+
+-static void set_single_step(struct task_struct *task)
++void
++tracehook_enable_single_step(struct task_struct *task)
+ {
+ task->thread.per_info.single_step = 1;
+ FixPerRegisters(task);
+ }
+
+-static void clear_single_step(struct task_struct *task)
++void
++tracehook_disable_single_step(struct task_struct *task)
+ {
+ task->thread.per_info.single_step = 0;
+ FixPerRegisters(task);
++ clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
++}
++
++int
++tracehook_single_step_enabled(struct task_struct *task)
++{
++ return task->thread.per_info.single_step;
+ }
+
+ /*
+@@ -107,7 +125,7 @@ void
+ ptrace_disable(struct task_struct *child)
+ {
+ /* make sure the single step bit is not set. */
+- clear_single_step(child);
++ tracehook_disable_single_step(child);
+ }
+
+ #ifndef CONFIG_64BIT
+@@ -593,6 +611,7 @@ do_ptrace_emu31(struct task_struct *chil
+ copied += sizeof(unsigned int);
+ }
+ return 0;
++#if 0 /* XXX */
+ case PTRACE_GETEVENTMSG:
+ return put_user((__u32) child->ptrace_message,
+ (unsigned int __force __user *) data);
+@@ -658,7 +677,7 @@ do_ptrace(struct task_struct *child, lon
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ child->exit_code = data;
+ /* make sure the single step bit is not set. */
+- clear_single_step(child);
++ tracehook_disable_single_step(child);
+ wake_up_process(child);
+ return 0;
+
+@@ -672,7 +691,7 @@ do_ptrace(struct task_struct *child, lon
+ return 0;
+ child->exit_code = SIGKILL;
+ /* make sure the single step bit is not set. */
+- clear_single_step(child);
++ tracehook_disable_single_step(child);
+ wake_up_process(child);
+ return 0;
+
+@@ -685,7 +704,7 @@ do_ptrace(struct task_struct *child, lon
+ if (data)
+ set_tsk_thread_flag(child, TIF_SINGLE_STEP);
+ else
+- set_single_step(child);
++ tracehook_enable_single_step(child);
+ /* give it a chance to run. */
+ wake_up_process(child);
+ return 0;
+@@ -738,30 +757,17 @@ syscall_trace(struct pt_regs *regs, int
+ if (unlikely(current->audit_context) && entryexit)
+ audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]), regs->gprs[2]);
+
+- if (!test_thread_flag(TIF_SYSCALL_TRACE))
+- goto out;
+- if (!(current->ptrace & PT_PTRACED))
+- goto out;
+- ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
+- ? 0x80 : 0));
+-
+- /*
+- * If the debuffer has set an invalid system call number,
+- * we prepare to skip the system call restart handling.
+- */
+- if (!entryexit && regs->gprs[2] >= NR_syscalls)
+- regs->trap = -1;
++ if (test_thread_flag(TIF_SYSCALL_TRACE)) {
++ tracehook_report_syscall(regs, entryexit);
+
+- /*
+- * this isn't the same as continuing with a signal, but it will do
+- * for normal use. strace only continues with a signal if the
+- * stopping signal is not SIGTRAP. -brl
+- */
+- if (current->exit_code) {
+- send_sig(current->exit_code, current, 1);
+- current->exit_code = 0;
++ /*
++ * If the debugger has set an invalid system call number,
++ * we prepare to skip the system call restart handling.
++ */
++ if (!entryexit && regs->gprs[2] >= NR_syscalls)
++ regs->trap = -1;
+ }
+- out:
++
+ if (unlikely(current->audit_context) && !entryexit)
+ audit_syscall_entry(test_thread_flag(TIF_31BIT)?AUDIT_ARCH_S390:AUDIT_ARCH_S390X,
+ regs->gprs[2], regs->orig_gpr2, regs->gprs[3],
+--- linux-2.6/arch/s390/kernel/signal.c
++++ linux-2.6/arch/s390/kernel/signal.c
+@@ -24,6 +24,7 @@
+ #include <linux/tty.h>
+ #include <linux/personality.h>
+ #include <linux/binfmts.h>
++#include <linux/tracehook.h>
+ #include <asm/ucontext.h>
+ #include <asm/uaccess.h>
+ #include <asm/lowcore.h>
+@@ -396,6 +397,8 @@ handle_signal(unsigned long sig, struct
+ sigaddset(¤t->blocked,sig);
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
++
++ tracehook_report_handle_signal(sig, ka, oldset, regs);
+ }
+
+ return ret;
+--- linux-2.6/include/asm-s390/tracehook.h
++++ linux-2.6/include/asm-s390/tracehook.h
+@@ -0,0 +1,46 @@
++/*
++ * Tracing hooks, s390/s390x support.
++ *
++ * Copyright (C) 2006, 2007 Red Hat, Inc. All rights reserved.
++ *
++ * This copyrighted material is made available to anyone wishing to use,
++ * modify, copy, or redistribute it subject to the terms and conditions
++ * of the GNU General Public License v.2.
++ *
++ * Red Hat Author: Roland McGrath.
++ */
++
++#ifndef _ASM_TRACEHOOK_H
++#define _ASM_TRACEHOOK_H 1
++
++#include <linux/sched.h>
++#include <asm/ptrace.h>
++
++/*
++ * See linux/tracehook.h for the descriptions of what these need to do.
++ */
++
++#define ARCH_HAS_SINGLE_STEP (1)
++
++/* These three are defined in arch/s390/kernel/ptrace.c. */
++void tracehook_enable_single_step(struct task_struct *tsk);
++void tracehook_disable_single_step(struct task_struct *tsk);
++int tracehook_single_step_enabled(struct task_struct *tsk);
++
++
++static inline void tracehook_enable_syscall_trace(struct task_struct *tsk)
++{
++ set_tsk_thread_flag(tsk, TIF_SYSCALL_TRACE);
++}
++
++static inline void tracehook_disable_syscall_trace(struct task_struct *tsk)
++{
++ clear_tsk_thread_flag(tsk, TIF_SYSCALL_TRACE);
++}
++
++static inline void tracehook_abort_syscall(struct pt_regs *regs)
++{
++ regs->gprs[2] = -1L;
++}
++
++#endif
linux-2.6-utrace-tracehook-sparc64.patch:
Index: linux-2.6-utrace-tracehook-sparc64.patch
===================================================================
RCS file: linux-2.6-utrace-tracehook-sparc64.patch
diff -N linux-2.6-utrace-tracehook-sparc64.patch
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ linux-2.6-utrace-tracehook-sparc64.patch 20 Jul 2007 18:48:03 -0000 1.3
@@ -0,0 +1,196 @@
+[PATCH 1b] utrace: tracehook for sparc64
+
+This patch does the initial tracehook conversion for sparc64.
+
+Signed-off-by: Roland McGrath <roland at redhat.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+
+---
+
+ arch/sparc64/kernel/ptrace.c | 29 +++++++++-------------------
+ arch/sparc64/kernel/signal.c | 2 ++
+ arch/sparc64/kernel/signal32.c | 2 ++
+ arch/sparc64/kernel/entry.S | 6 ------
+ include/asm-sparc64/tracehook.h | 40 +++++++++++++++++++++++++++++++++++++++
+ 5 files changed, 53 insertions(+), 26 deletions(-)
+ create include/asm-sparc64/tracehook.h
+
+--- linux-2.6/arch/sparc64/kernel/ptrace.c
++++ linux-2.6/arch/sparc64/kernel/ptrace.c
+@@ -22,6 +22,7 @@
+ #include <linux/seccomp.h>
+ #include <linux/audit.h>
+ #include <linux/signal.h>
++#include <linux/tracehook.h>
+
+ #include <asm/asi.h>
+ #include <asm/pgtable.h>
+@@ -33,6 +34,7 @@
+ #include <asm/page.h>
+ #include <asm/cpudata.h>
+
++#if 0 /* XXX */
+ /* Returning from ptrace is a bit tricky because the syscall return
+ * low level code assumes any value returned which is negative and
+ * is a valid errno will mean setting the condition codes to indicate
+@@ -82,6 +84,7 @@ pt_os_succ_return (struct pt_regs *regs,
+ else
+ pt_succ_return_linux (regs, val, addr);
+ }
++#endif
+
+ /* #define ALLOW_INIT_TRACING */
+ /* #define DEBUG_PTRACE */
+@@ -167,6 +170,7 @@ void flush_ptrace_access(struct vm_area_
+ }
+ }
+
++#if 0 /* XXX */
+ asmlinkage void do_ptrace(struct pt_regs *regs)
+ {
+ int request = regs->u_regs[UREG_I0];
+@@ -643,11 +647,13 @@ out_tsk:
+ out:
+ unlock_kernel();
+ }
++#endif
+
+ asmlinkage void syscall_trace(struct pt_regs *regs, int syscall_exit_p)
+ {
+ /* do the secure computing check first */
+- secure_computing(regs->u_regs[UREG_G1]);
++ if (!syscall_exit_p)
++ secure_computing(regs->u_regs[UREG_G1]);
+
+ if (unlikely(current->audit_context) && syscall_exit_p) {
+ unsigned long tstate = regs->tstate;
+@@ -659,26 +665,9 @@ asmlinkage void syscall_trace(struct pt_
+ audit_syscall_exit(result, regs->u_regs[UREG_I0]);
+ }
+
+- if (!(current->ptrace & PT_PTRACED))
+- goto out;
+-
+- if (!test_thread_flag(TIF_SYSCALL_TRACE))
+- goto out;
+-
+- ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
+- ? 0x80 : 0));
++ if (test_thread_flag(TIF_SYSCALL_TRACE))
++ tracehook_report_syscall(regs, syscall_exit_p);
+
+- /*
+- * this isn't the same as continuing with a signal, but it will do
+- * for normal use. strace only continues with a signal if the
+- * stopping signal is not SIGTRAP. -brl
+- */
+- if (current->exit_code) {
+- send_sig(current->exit_code, current, 1);
+- current->exit_code = 0;
+- }
+-
+-out:
+ if (unlikely(current->audit_context) && !syscall_exit_p)
+ audit_syscall_entry((test_thread_flag(TIF_32BIT) ?
+ AUDIT_ARCH_SPARC :
+--- linux-2.6/arch/sparc64/kernel/signal.c
++++ linux-2.6/arch/sparc64/kernel/signal.c
+@@ -22,6 +22,7 @@
+ #include <linux/tty.h>
+ #include <linux/binfmts.h>
+ #include <linux/bitops.h>
++#include <linux/tracehook.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/ptrace.h>
+@@ -490,6 +491,7 @@ static inline void handle_signal(unsigne
+ sigaddset(¤t->blocked,signr);
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
++ tracehook_report_handle_signal(signr, ka, oldset, regs);
+ }
+
+ static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
+--- linux-2.6/arch/sparc64/kernel/signal32.c
++++ linux-2.6/arch/sparc64/kernel/signal32.c
+@@ -20,6 +20,7 @@
+ #include <linux/binfmts.h>
+ #include <linux/compat.h>
+ #include <linux/bitops.h>
++#include <linux/tracehook.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/ptrace.h>
+@@ -1236,6 +1237,7 @@ static inline void handle_signal32(unsig
+ sigaddset(¤t->blocked,signr);
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
++ tracehook_report_handle_signal(signr, ka, oldset, regs);
+ }
+
+ static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs,
+--- linux-2.6/arch/sparc64/kernel/entry.S
++++ linux-2.6/arch/sparc64/kernel/entry.S
+@@ -1434,7 +1434,6 @@ execve_merge:
+
+ .globl sys_pipe, sys_sigpause, sys_nis_syscall
+ .globl sys_rt_sigreturn
+- .globl sys_ptrace
+ .globl sys_sigaltstack
+ .align 32
+ sys_pipe: ba,pt %xcc, sparc_pipe
+@@ -1477,11 +1476,6 @@ sys32_rt_sigreturn:
+ add %o7, 1f-.-4, %o7
+ nop
+ #endif
+-sys_ptrace: add %sp, PTREGS_OFF, %o0
+- call do_ptrace
+- add %o7, 1f-.-4, %o7
+- nop
+- .align 32
+ 1: ldx [%curptr + TI_FLAGS], %l5
+ andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0
+ be,pt %icc, rtrap
+--- linux-2.6/include/asm-sparc64/tracehook.h
++++ linux-2.6/include/asm-sparc64/tracehook.h
+@@ -0,0 +1,40 @@
++/*
++ * Tracing hooks, SPARC64 CPU support
++ *
++ * Copyright (C) 2006, 2007 Red Hat, Inc. All rights reserved.
++ *
++ * This copyrighted material is made available to anyone wishing to use,
++ * modify, copy, or redistribute it subject to the terms and conditions
++ * of the GNU General Public License v.2.
++ *
++ * Red Hat Author: Roland McGrath.
++ */
++
++#ifndef _ASM_TRACEHOOK_H
++#define _ASM_TRACEHOOK_H 1
++
++
++#include <linux/sched.h>
++#include <asm/ptrace.h>
++
++/*
++ * See linux/tracehook.h for the descriptions of what these need to do.
++ */
++
++
++static inline void tracehook_enable_syscall_trace(struct task_struct *tsk)
++{
++ set_tsk_thread_flag(tsk, TIF_SYSCALL_TRACE);
++}
++
++static inline void tracehook_disable_syscall_trace(struct task_struct *tsk)
++{
++ clear_tsk_thread_flag(tsk, TIF_SYSCALL_TRACE);
++}
++
++static inline void tracehook_abort_syscall(struct pt_regs *regs)
++{
++ regs->u_regs[UREG_G1] = -1L;
++}
++
++#endif
linux-2.6-utrace-tracehook-um.patch:
Index: linux-2.6-utrace-tracehook-um.patch
===================================================================
RCS file: linux-2.6-utrace-tracehook-um.patch
diff -N linux-2.6-utrace-tracehook-um.patch
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ linux-2.6-utrace-tracehook-um.patch 20 Jul 2007 18:48:03 -0000 1.3
@@ -0,0 +1,596 @@
+[PATCH 1d] utrace: tracehook for UML
+
+From: Jeff Dike <jdike at addtoit.com>
+
+This is the tracehook part of the UML utrace work, enough to get UML
+building with the utrace prep patches applied.
+
+Checks of task->ptrace & PT_DTRACE were replaced with
+test_thread_flag(TIF_SINGLESTEP, or removed, in the case of execve.
+
+Most of arch/um/kernel/ptrace.c is gone, to be reinstated in future
+utrace work.
+
+Similarly, calls to syscall_trace and ptrace notifications in the
+signal delivery code are gone.
+
+Signed-off-by: Roland McGrath <roland at redhat.com>
+Signed-off-by: Jeff Dike <jdike at linux.intel.com>
+
+---
+
+ arch/um/kernel/ptrace.c | 327 ++++-----------------------------------
+ arch/um/kernel/skas/syscall.c | 4
+ arch/um/kernel/signal.c | 5 -
+ arch/um/kernel/exec.c | 1
+ arch/um/kernel/process.c | 6 -
+ arch/um/sys-i386/signal.c | 4
+ include/asm-um/ptrace-x86_64.h | 2
+ include/asm-um/ptrace-generic.h | 3
+ include/asm-um/ptrace-i386.h | 2
+ include/asm-um/tracehook.h | 57 +++++++
+ include/asm-um/thread_info.h | 3
+ 11 files changed, 106 insertions(+), 308 deletions(-)
+ create include/asm-um/tracehook.h
+
+--- linux-2.6/arch/um/kernel/ptrace.c
++++ linux-2.6/arch/um/kernel/ptrace.c
+@@ -3,261 +3,21 @@
+ * Licensed under the GPL
+ */
+
+-#include "linux/sched.h"
+-#include "linux/mm.h"
+-#include "linux/errno.h"
+-#include "linux/smp_lock.h"
+-#include "linux/security.h"
+-#include "linux/ptrace.h"
+-#include "linux/audit.h"
+-#ifdef CONFIG_PROC_MM
+-#include "linux/proc_mm.h"
+-#endif
+-#include "asm/ptrace.h"
+-#include "asm/uaccess.h"
+-#include "kern_util.h"
+-#include "skas_ptrace.h"
+-#include "sysdep/ptrace.h"
+-#include "os.h"
+-
+-static inline void set_singlestepping(struct task_struct *child, int on)
+-{
+- if (on)
+- child->ptrace |= PT_DTRACE;
+- else
+- child->ptrace &= ~PT_DTRACE;
+- child->thread.singlestep_syscall = 0;
+-
+-#ifdef SUBARCH_SET_SINGLESTEPPING
+- SUBARCH_SET_SINGLESTEPPING(child, on);
+-#endif
+-}
++#include <linux/audit.h>
++#include <linux/elf.h>
++#include <linux/module.h>
++#include <linux/ptrace.h>
++#include <linux/tracehook.h>
+
+ /*
+ * Called by kernel/ptrace.c when detaching..
+ */
+ void ptrace_disable(struct task_struct *child)
+ {
+- set_singlestepping(child,0);
+-}
+-
+-extern int peek_user(struct task_struct * child, long addr, long data);
+-extern int poke_user(struct task_struct * child, long addr, long data);
+-
+-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+-{
+- int i, ret;
+- unsigned long __user *p = (void __user *)(unsigned long)data;
+-
+- switch (request) {
+- /* when I and D space are separate, these will need to be fixed. */
+- case PTRACE_PEEKTEXT: /* read word at location addr. */
+- case PTRACE_PEEKDATA: {
+- unsigned long tmp;
+- int copied;
+-
+- ret = -EIO;
+- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+- if (copied != sizeof(tmp))
+- break;
+- ret = put_user(tmp, p);
+- break;
+- }
+-
+- /* read the word at location addr in the USER area. */
+- case PTRACE_PEEKUSR:
+- ret = peek_user(child, addr, data);
+- break;
+-
+- /* when I and D space are separate, this will have to be fixed. */
+- case PTRACE_POKETEXT: /* write the word at location addr. */
+- case PTRACE_POKEDATA:
+- ret = -EIO;
+- if (access_process_vm(child, addr, &data, sizeof(data),
+- 1) != sizeof(data))
+- break;
+- ret = 0;
+- break;
+-
+- case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
+- ret = poke_user(child, addr, data);
+- break;
+-
+- case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
+- case PTRACE_CONT: { /* restart after signal. */
+- ret = -EIO;
+- if (!valid_signal(data))
+- break;
+-
+- set_singlestepping(child, 0);
+- if (request == PTRACE_SYSCALL) {
+- set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+- }
+- else {
+- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+- }
+- child->exit_code = data;
+- wake_up_process(child);
+- ret = 0;
+- break;
+- }
+-
+-/*
+- * make the child exit. Best I can do is send it a sigkill.
+- * perhaps it should be put in the status that it wants to
+- * exit.
+- */
+- case PTRACE_KILL: {
+- ret = 0;
+- if (child->exit_state == EXIT_ZOMBIE) /* already dead */
+- break;
+-
+- set_singlestepping(child, 0);
+- child->exit_code = SIGKILL;
+- wake_up_process(child);
+- break;
+- }
+-
+- case PTRACE_SINGLESTEP: { /* set the trap flag. */
+- ret = -EIO;
+- if (!valid_signal(data))
+- break;
+- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+- set_singlestepping(child, 1);
+- child->exit_code = data;
+- /* give it a chance to run. */
+- wake_up_process(child);
+- ret = 0;
+- break;
+- }
+-
+- case PTRACE_DETACH:
+- /* detach a process that was attached. */
+- ret = ptrace_detach(child, data);
+- break;
+-
+-#ifdef PTRACE_GETREGS
+- case PTRACE_GETREGS: { /* Get all gp regs from the child. */
+- if (!access_ok(VERIFY_WRITE, p, MAX_REG_OFFSET)) {
+- ret = -EIO;
+- break;
+- }
+- for ( i = 0; i < MAX_REG_OFFSET; i += sizeof(long) ) {
+- __put_user(getreg(child, i), p);
+- p++;
+- }
+- ret = 0;
+- break;
+- }
+-#endif
+-#ifdef PTRACE_SETREGS
+- case PTRACE_SETREGS: { /* Set all gp regs in the child. */
+- unsigned long tmp = 0;
+- if (!access_ok(VERIFY_READ, p, MAX_REG_OFFSET)) {
+- ret = -EIO;
+- break;
+- }
+- for ( i = 0; i < MAX_REG_OFFSET; i += sizeof(long) ) {
+- __get_user(tmp, p);
+- putreg(child, i, tmp);
+- p++;
+- }
+- ret = 0;
+- break;
+- }
+-#endif
+-#ifdef PTRACE_GETFPREGS
+- case PTRACE_GETFPREGS: /* Get the child FPU state. */
+- ret = get_fpregs(data, child);
+- break;
+-#endif
+-#ifdef PTRACE_SETFPREGS
+- case PTRACE_SETFPREGS: /* Set the child FPU state. */
+- ret = set_fpregs(data, child);
+- break;
+-#endif
+-#ifdef PTRACE_GETFPXREGS
+- case PTRACE_GETFPXREGS: /* Get the child FPU state. */
+- ret = get_fpxregs(data, child);
+- break;
+-#endif
+-#ifdef PTRACE_SETFPXREGS
+- case PTRACE_SETFPXREGS: /* Set the child FPU state. */
+- ret = set_fpxregs(data, child);
+- break;
+-#endif
+- case PTRACE_GET_THREAD_AREA:
+- ret = ptrace_get_thread_area(child, addr,
+- (struct user_desc __user *) data);
+- break;
+-
+- case PTRACE_SET_THREAD_AREA:
+- ret = ptrace_set_thread_area(child, addr,
+- (struct user_desc __user *) data);
+- break;
+-
+- case PTRACE_FAULTINFO: {
+- /* Take the info from thread->arch->faultinfo,
+- * but transfer max. sizeof(struct ptrace_faultinfo).
+- * On i386, ptrace_faultinfo is smaller!
+- */
+- ret = copy_to_user(p, &child->thread.arch.faultinfo,
+- sizeof(struct ptrace_faultinfo));
+- if(ret)
+- break;
+- break;
+- }
+-
+-#ifdef PTRACE_LDT
+- case PTRACE_LDT: {
+- struct ptrace_ldt ldt;
+-
+- if(copy_from_user(&ldt, p, sizeof(ldt))){
+- ret = -EIO;
+- break;
+- }
+-
+- /* This one is confusing, so just punt and return -EIO for
+- * now
+- */
+- ret = -EIO;
+- break;
+- }
+-#endif
+-#ifdef CONFIG_PROC_MM
+- case PTRACE_SWITCH_MM: {
+- struct mm_struct *old = child->mm;
+- struct mm_struct *new = proc_mm_get_mm(data);
+-
+- if(IS_ERR(new)){
+- ret = PTR_ERR(new);
+- break;
+- }
+-
+- atomic_inc(&new->mm_users);
+- child->mm = new;
+- child->active_mm = new;
+- mmput(old);
+- ret = 0;
+- break;
+- }
+-#endif
+-#ifdef PTRACE_ARCH_PRCTL
+- case PTRACE_ARCH_PRCTL:
+- /* XXX Calls ptrace on the host - needs some SMP thinking */
+- ret = arch_prctl_skas(child, data, (void *) addr);
+- break;
+-#endif
+- default:
+- ret = ptrace_request(child, request, addr, data);
+- break;
+- }
+-
+- return ret;
+ }
+
+-void send_sigtrap(struct task_struct *tsk, union uml_pt_regs *regs,
+- int error_code)
++static void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
++ int error_code)
+ {
+ struct siginfo info;
+
+@@ -266,56 +26,39 @@ void send_sigtrap(struct task_struct *ts
+ info.si_code = TRAP_BRKPT;
+
+ /* User-mode eip? */
+- info.si_addr = UPT_IS_USER(regs) ? (void __user *) UPT_IP(regs) : NULL;
++ info.si_addr = UPT_IS_USER(®s->regs) ?
++ (void __user *) UPT_IP(®s->regs) : NULL;
+
+ /* Send us the fakey SIGTRAP */
+ force_sig_info(SIGTRAP, &info, tsk);
+ }
+
+-/* XXX Check PT_DTRACE vs TIF_SINGLESTEP for singlestepping check and
+- * PT_PTRACED vs TIF_SYSCALL_TRACE for syscall tracing check
++/* notification of system call entry/exit
++ * - triggered by current->work.syscall_trace
+ */
+-void syscall_trace(union uml_pt_regs *regs, int entryexit)
++void do_syscall_trace(struct pt_regs *regs, int entryexit)
+ {
+- int is_singlestep = (current->ptrace & PT_DTRACE) && entryexit;
+- int tracesysgood;
+-
+- if (unlikely(current->audit_context)) {
+- if (!entryexit)
+- audit_syscall_entry(HOST_AUDIT_ARCH,
+- UPT_SYSCALL_NR(regs),
+- UPT_SYSCALL_ARG1(regs),
+- UPT_SYSCALL_ARG2(regs),
+- UPT_SYSCALL_ARG3(regs),
+- UPT_SYSCALL_ARG4(regs));
+- else audit_syscall_exit(AUDITSC_RESULT(UPT_SYSCALL_RET(regs)),
+- UPT_SYSCALL_RET(regs));
+- }
+-
+- /* Fake a debug trap */
+- if (is_singlestep)
+- send_sigtrap(current, regs, 0);
+-
+- if (!test_thread_flag(TIF_SYSCALL_TRACE))
+- return;
+-
+- if (!(current->ptrace & PT_PTRACED))
+- return;
+-
+- /* the 0x80 provides a way for the tracing parent to distinguish
+- between a syscall stop and SIGTRAP delivery */
+- tracesysgood = (current->ptrace & PT_TRACESYSGOOD);
+- ptrace_notify(SIGTRAP | (tracesysgood ? 0x80 : 0));
+-
+- if (entryexit) /* force do_signal() --> is_syscall() */
+- set_thread_flag(TIF_SIGPENDING);
+-
+- /* this isn't the same as continuing with a signal, but it will do
+- * for normal use. strace only continues with a signal if the
+- * stopping signal is not SIGTRAP. -brl
+- */
+- if (current->exit_code) {
+- send_sig(current->exit_code, current, 1);
+- current->exit_code = 0;
+- }
++ /* do the secure computing check first */
++ if (!entryexit)
++ secure_computing(PT_REGS_SYSCALL_NR(regs));
++
++ if (unlikely(current->audit_context) && entryexit)
++ audit_syscall_exit(AUDITSC_RESULT(UPT_SYSCALL_RET(®s->regs)),
++ UPT_SYSCALL_RET(®s->regs));
++
++ if (test_thread_flag(TIF_SYSCALL_TRACE))
++ tracehook_report_syscall(regs, entryexit);
++
++ if (test_thread_flag(TIF_SINGLESTEP) && entryexit) {
++ send_sigtrap(current, regs, 0); /* XXX */
++ tracehook_report_syscall_step(regs);
++ }
++
++ if (unlikely(current->audit_context) && !entryexit)
++ audit_syscall_entry(HOST_AUDIT_ARCH,
++ UPT_SYSCALL_NR(®s->regs),
++ UPT_SYSCALL_ARG1(®s->regs),
++ UPT_SYSCALL_ARG2(®s->regs),
++ UPT_SYSCALL_ARG3(®s->regs),
++ UPT_SYSCALL_ARG4(®s->regs));
+ }
+--- linux-2.6/arch/um/kernel/skas/syscall.c
++++ linux-2.6/arch/um/kernel/skas/syscall.c
+@@ -19,8 +19,6 @@ void handle_syscall(union uml_pt_regs *r
+ long result;
+ int syscall;
+
+- syscall_trace(r, 0);
+-
+ current->thread.nsyscalls++;
+ nsyscalls++;
+
+@@ -38,6 +36,4 @@ void handle_syscall(union uml_pt_regs *r
+ else result = EXECUTE_SYSCALL(syscall, regs);
+
+ REGS_SET_SYSCALL_RETURN(r->skas.regs, result);
+-
+- syscall_trace(r, 1);
+ }
+--- linux-2.6/arch/um/kernel/signal.c
++++ linux-2.6/arch/um/kernel/signal.c
+@@ -14,6 +14,7 @@
+ #include "linux/tty.h"
+ #include "linux/binfmts.h"
+ #include "linux/ptrace.h"
++#include "linux/tracehook.h"
+ #include "asm/signal.h"
+ #include "asm/uaccess.h"
+ #include "asm/unistd.h"
+@@ -92,6 +93,8 @@ static int handle_signal(struct pt_regs
+ sigaddset(¤t->blocked, signr);
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
++
++ tracehook_report_handle_signal(signr, ka, oldset, regs);
+ }
+
+ return err;
+@@ -147,7 +150,7 @@ static int kern_do_signal(struct pt_regs
+ * on the host. The tracing thread will check this flag and
+ * PTRACE_SYSCALL if necessary.
+ */
+- if(current->ptrace & PT_DTRACE)
++ if(test_thread_flag(TIF_SYSCALL_TRACE))
+ current->thread.singlestep_syscall =
+ is_syscall(PT_REGS_IP(¤t->thread.regs));
+
+--- linux-2.6/arch/um/kernel/exec.c
++++ linux-2.6/arch/um/kernel/exec.c
+@@ -51,7 +51,6 @@ static long execve1(char *file, char __u
+ error = do_execve(file, argv, env, ¤t->thread.regs);
+ if (error == 0){
+ task_lock(current);
+- current->ptrace &= ~PT_DTRACE;
+ #ifdef SUBARCH_EXECVE1
+ SUBARCH_EXECVE1(¤t->thread.regs.regs);
+ #endif
+--- linux-2.6/arch/um/kernel/process.c
++++ linux-2.6/arch/um/kernel/process.c
+@@ -403,11 +403,11 @@ int singlestepping(void * t)
+ {
+ struct task_struct *task = t ? t : current;
+
+- if ( ! (task->ptrace & PT_DTRACE) )
+- return(0);
++ if (!test_thread_flag(TIF_SINGLESTEP))
++ return 0;
+
+ if (task->thread.singlestep_syscall)
+- return(1);
++ return 1;
+
+ return 2;
+ }
+--- linux-2.6/arch/um/sys-i386/signal.c
++++ linux-2.6/arch/um/sys-i386/signal.c
+@@ -274,8 +274,6 @@ int setup_signal_stack_sc(unsigned long
+ PT_REGS_EDX(regs) = (unsigned long) 0;
+ PT_REGS_ECX(regs) = (unsigned long) 0;
+
+- if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
+- ptrace_notify(SIGTRAP);
+ return 0;
+
+ err:
+@@ -331,8 +329,6 @@ int setup_signal_stack_si(unsigned long
+ PT_REGS_EDX(regs) = (unsigned long) &frame->info;
+ PT_REGS_ECX(regs) = (unsigned long) &frame->uc;
+
+- if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
+- ptrace_notify(SIGTRAP);
+ return 0;
+
+ err:
+--- linux-2.6/include/asm-um/ptrace-x86_64.h
++++ linux-2.6/include/asm-um/ptrace-x86_64.h
+@@ -14,6 +14,8 @@
+ #define __FRAME_OFFSETS /* Needed to get the R* macros */
+ #include "asm/ptrace-generic.h"
+
++#define ARCH_HAS_SINGLE_STEP (1)
++
+ #define HOST_AUDIT_ARCH AUDIT_ARCH_X86_64
+
+ /* Also defined in sysdep/ptrace.h, so may already be defined. */
+--- linux-2.6/include/asm-um/ptrace-generic.h
++++ linux-2.6/include/asm-um/ptrace-generic.h
+@@ -44,9 +44,6 @@ extern int set_fpxregs(unsigned long buf
+
+ extern void show_regs(struct pt_regs *regs);
+
+-extern void send_sigtrap(struct task_struct *tsk, union uml_pt_regs *regs,
+- int error_code);
+-
+ extern int arch_copy_tls(struct task_struct *new);
+ extern void clear_flushed_tls(struct task_struct *task);
+
+--- linux-2.6/include/asm-um/ptrace-i386.h
++++ linux-2.6/include/asm-um/ptrace-i386.h
+@@ -6,6 +6,8 @@
+ #ifndef __UM_PTRACE_I386_H
+ #define __UM_PTRACE_I386_H
+
++#define ARCH_HAS_SINGLE_STEP (1)
++
+ #define HOST_AUDIT_ARCH AUDIT_ARCH_I386
+
+ #include "linux/compiler.h"
+--- linux-2.6/include/asm-um/tracehook.h
++++ linux-2.6/include/asm-um/tracehook.h
+@@ -0,0 +1,57 @@
++/*
++ * Tracing hooks, i386 CPU support
++ *
++ * Copyright (C) 2006, 2007 Red Hat, Inc. All rights reserved.
++ *
++ * This copyrighted material is made available to anyone wishing to use,
++ * modify, copy, or redistribute it subject to the terms and conditions
++ * of the GNU General Public License v.2.
++ *
++ * Red Hat Author: Roland McGrath.
++ *
++ * Munged for UML - jdike@{addtoit,linux.intel}.com
++ */
++
++#ifndef _ASM_TRACEHOOK_H
++#define _ASM_TRACEHOOK_H 1
++
++#include <linux/sched.h>
++#include <asm/ptrace.h>
++#include <asm/thread_info.h>
++
++/*
++ * See linux/tracehook.h for the descriptions of what these need to do.
++ */
++
++static inline void tracehook_enable_single_step(struct task_struct *tsk)
++{
++ set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
++}
++
++static inline void tracehook_disable_single_step(struct task_struct *tsk)
++{
++ clear_tsk_thread_flag(tsk, TIF_SINGLESTEP);
++}
++
++static inline int tracehook_single_step_enabled(struct task_struct *tsk)
++{
++ return test_tsk_thread_flag(tsk, TIF_SINGLESTEP);
++}
++
++static inline void tracehook_enable_syscall_trace(struct task_struct *tsk)
++{
++ set_tsk_thread_flag(tsk, TIF_SYSCALL_TRACE);
++}
++
++static inline void tracehook_disable_syscall_trace(struct task_struct *tsk)
++{
++ clear_tsk_thread_flag(tsk, TIF_SYSCALL_TRACE);
++}
++
++static inline void tracehook_abort_syscall(struct pt_regs *regs)
++{
++ PT_REGS_SYSCALL_NR(regs) = -1;
++}
++
++
++#endif
+--- linux-2.6/include/asm-um/thread_info.h
++++ linux-2.6/include/asm-um/thread_info.h
+@@ -71,6 +71,8 @@ static inline struct thread_info *curren
+ #define TIF_MEMDIE 5
+ #define TIF_SYSCALL_AUDIT 6
+ #define TIF_RESTORE_SIGMASK 7
++#define TIF_SINGLESTEP 8 /* restore singlestep on return to user
++ * mode */
+
+ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+@@ -79,5 +81,6 @@ static inline struct thread_info *curren
+ #define _TIF_MEMDIE (1 << TIF_MEMDIE)
+ #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+ #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
++#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
+
+ #endif
linux-2.6-utrace-tracehook.patch:
View full diff with command:
/usr/bin/cvs -f diff -kk -u -N -r 1.2 -r 1.3 linux-2.6-utrace-tracehook.patch
Index: linux-2.6-utrace-tracehook.patch
===================================================================
RCS file: linux-2.6-utrace-tracehook.patch
diff -N linux-2.6-utrace-tracehook.patch
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ linux-2.6-utrace-tracehook.patch 20 Jul 2007 18:48:03 -0000 1.3
@@ -0,0 +1,4155 @@
+[PATCH 1] utrace: tracehook (die, ptrace, die)
+
+This patch rips out the old ptrace implementation. It leaves behind stubs
+so ptrace just fails with ENOSYS. This is not really a useful kernel tree
+to build, but it's a clean removal of the old code before adding the new.
+Some dead code is left behind especially in architecture ptrace.c files;
+this is left in to be reused under new interfaces by later patches,
+reducing the total disruption to the code.
+
+The "real_parent" member in task_struct is renamed "parent", and the old
+"parent" member is gone. That is to say, nothing interferes in the normal
+parent/child links any more. The fact that ptrace could cause these links
+to change was the source of numerous race conditions and similar bugs
+uncovered and worked around (often contortedly) in the core task management
+code under the old ptrace implementation.
+
+In each place in core code that referred to the old ptrace stuff, we now
+have the tracehook interface. These declarations are in <linux/tracehook.h>,
+and use "tracehook_*" function names. This centralizes all the hooks
+that a thread tracing facility needs into core code, and the file
+documents the calling environment (locking conditions, etc.) and meaning
+of each hook. The definitions here are all stubs doing nothing, so
+there are no user debugging features in the kernel at all. They provide
+the placeholders for where a tracing interface can tie into the kernel.
+
+The architecture support for single-step (and block-step) from the old
+ptrace support is moved into tracehook_* function interfaces. Nothing
+yet calls these, but this provides the clean interface that user
+debugging support can use for the architecture-specific single-step
+control code.
+
+Signed-off-by: Roland McGrath <roland at redhat.com>
+
+---
+
+ arch/alpha/kernel/asm-offsets.c | 2
+ arch/alpha/kernel/entry.S | 4
+ arch/arm/kernel/ptrace.c | 17 -
+ arch/arm26/kernel/ptrace.c | 32 --
+ arch/frv/kernel/ptrace.c | 15 -
+ arch/i386/kernel/entry.S | 7
+ arch/i386/kernel/process.c | 3
+ arch/i386/kernel/ptrace.c | 104 +--------
+ arch/i386/kernel/signal.c | 37 +--
+ arch/i386/kernel/vm86.c | 7
+ arch/i386/math-emu/fpu_entry.c | 6
+ arch/ia64/kernel/asm-offsets.c | 2
+ arch/ia64/kernel/fsys.S | 16 -
+ arch/ia64/kernel/mca.c | 2
+ arch/mips/kernel/ptrace.c | 21 -
+ arch/mips/kernel/sysirix.c | 2
+ arch/powerpc/kernel/asm-offsets.c | 2
+ arch/powerpc/kernel/process.c | 5
+ arch/powerpc/kernel/ptrace-common.h | 16 -
+ arch/powerpc/kernel/ptrace.c | 76 +-----
+ arch/powerpc/kernel/ptrace32.c | 13 -
+ arch/powerpc/kernel/signal_32.c | 3
+ arch/powerpc/kernel/signal_64.c | 3
+ arch/powerpc/kernel/sys_ppc32.c | 5
+ arch/ppc/kernel/asm-offsets.c | 2
+ arch/s390/kernel/compat_linux.c | 3
+ arch/s390/kernel/process.c | 3
+ arch/sparc64/kernel/binfmt_aout32.c | 2
+ arch/sparc64/kernel/process.c | 3
+ arch/sparc64/kernel/sys_sparc32.c | 3
+ arch/x86_64/ia32/ia32_aout.c | 6
+ arch/x86_64/ia32/ia32_signal.c | 7
+ arch/x86_64/ia32/ia32entry.S | 4
+ arch/x86_64/ia32/ptrace32.c | 2
+ arch/x86_64/ia32/sys_ia32.c | 5
+ arch/x86_64/kernel/entry.S | 8
+ arch/x86_64/kernel/process.c | 5
+ arch/x86_64/kernel/ptrace.c | 57 +---
+ arch/x86_64/kernel/signal.c | 28 +-
+ arch/x86_64/kernel/traps.c | 8
+ arch/x86_64/mm/fault.c | 4
+ drivers/connector/cn_proc.c | 4
+ fs/binfmt_aout.c | 6
+ fs/binfmt_elf.c | 6
+ fs/binfmt_elf_fdpic.c | 7
+ fs/binfmt_flat.c | 3
+ fs/binfmt_som.c | 2
+ fs/exec.c | 11
+ fs/proc/array.c | 12 -
+ fs/proc/base.c | 17 -
+ include/asm-i386/signal.h | 4
+ include/asm-i386/thread_info.h | 7
+ include/asm-i386/tracehook.h | 52 ++++
+ include/asm-powerpc/tracehook.h | 74 ++++++
+ include/asm-x86_64/thread_info.h | 3
+ include/asm-x86_64/tracehook.h | 51 ++++
+ include/linux/init_task.h | 3
+ include/linux/ptrace.h | 18 -
+ include/linux/sched.h | 16 -
+ include/linux/tracehook.h | 414 ++++++++++++++++++++++++++++++++++++
+ kernel/exit.c | 252 ++++++---------------
+ kernel/fork.c | 66 +----
+ kernel/ptrace.c | 299 +-------------------------
+ kernel/signal.c | 212 +++---------------
+ kernel/sys.c | 2
+ kernel/timer.c | 6
+ kernel/tsacct.c | 2
+ mm/nommu.c | 4
+ security/selinux/hooks.c | 54 ++--
+ security/selinux/include/objsec.h | 1
+ 70 files changed, 942 insertions(+), 1216 deletions(-)
+ create include/linux/tracehook.h
+ create include/asm-i386/tracehook.h
+ create include/asm-powerpc/tracehook.h
+ create include/asm-x86_64/tracehook.h
+
+Index: b/fs/binfmt_flat.c
+===================================================================
+--- a/fs/binfmt_flat.c
++++ b/fs/binfmt_flat.c
+@@ -914,9 +914,6 @@ static int load_flat_binary(struct linux
+
+ start_thread(regs, start_addr, current->mm->start_stack);
+
+- if (current->ptrace & PT_PTRACED)
+- send_sig(SIGTRAP, current, 0);
+-
+ return 0;
+ }
+
+Index: b/fs/binfmt_som.c
+===================================================================
+--- a/fs/binfmt_som.c
++++ b/fs/binfmt_som.c
+@@ -285,8 +285,6 @@ load_som_binary(struct linux_binprm * bp
+ map_hpux_gateway_page(current,current->mm);
+
+ start_thread_som(regs, som_entry, bprm->p);
+- if (current->ptrace & PT_PTRACED)
+- send_sig(SIGTRAP, current, 0);
+ return 0;
+
+ /* error cleanup */
+Index: b/fs/proc/base.c
+===================================================================
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -67,6 +67,7 @@
+ #include <linux/mount.h>
+ #include <linux/security.h>
+ #include <linux/ptrace.h>
++#include <linux/tracehook.h>
+ #include <linux/seccomp.h>
+ #include <linux/cpuset.h>
+ #include <linux/audit.h>
+@@ -192,13 +193,6 @@ static int proc_root_link(struct inode *
+ return result;
+ }
+
+-#define MAY_PTRACE(task) \
+- (task == current || \
+- (task->parent == current && \
+- (task->ptrace & PT_PTRACED) && \
+- (task->state == TASK_STOPPED || task->state == TASK_TRACED) && \
+- security_ptrace(current,task) == 0))
+-
+ static int proc_pid_environ(struct task_struct *task, char * buffer)
+ {
+ int res = 0;
+@@ -523,7 +517,8 @@ static ssize_t mem_read(struct file * fi
+ if (!task)
+ goto out_no_task;
+
+- if (!MAY_PTRACE(task) || !ptrace_may_attach(task))
++ if (!tracehook_allow_access_process_vm(task)
++ || !ptrace_may_attach(task))
+ goto out;
+
+ ret = -ENOMEM;
+@@ -549,7 +544,8 @@ static ssize_t mem_read(struct file * fi
+
+ this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
+ retval = access_process_vm(task, src, page, this_len, 0);
+- if (!retval || !MAY_PTRACE(task) || !ptrace_may_attach(task)) {
++ if (!retval || !tracehook_allow_access_process_vm(task)
++ || !ptrace_may_attach(task)) {
+ if (!ret)
+ ret = -EIO;
+ break;
+@@ -593,7 +589,8 @@ static ssize_t mem_write(struct file * f
+ if (!task)
+ goto out_no_task;
+
+- if (!MAY_PTRACE(task) || !ptrace_may_attach(task))
++ if (!tracehook_allow_access_process_vm(task)
++ || !ptrace_may_attach(task))
+ goto out;
+
[...3762 lines suppressed...]
+- current->ptrace &= ~PT_DTRACE; \
++ if (test_and_clear_thread_flag(TIF_FORCED_TF)) \
+ (regs)->eflags &= ~TF_MASK; \
+- } \
+ } while (0)
+
+ #endif /* __KERNEL__ */
+Index: b/include/asm-powerpc/tracehook.h
+===================================================================
+--- /dev/null
++++ b/include/asm-powerpc/tracehook.h
+@@ -0,0 +1,74 @@
++/*
++ * Tracing hooks, PowerPC CPU support
++ *
++ * Copyright (C) 2006, 2007 Red Hat, Inc. All rights reserved.
++ *
++ * This copyrighted material is made available to anyone wishing to use,
++ * modify, copy, or redistribute it subject to the terms and conditions
++ * of the GNU General Public License v.2.
++ *
++ * Red Hat Author: Roland McGrath.
++ */
++
++#ifndef _ASM_TRACEHOOK_H
++#define _ASM_TRACEHOOK_H 1
++
++#include <linux/sched.h>
++#include <asm/ptrace.h>
++
++/*
++ * See linux/tracehook.h for the descriptions of what these need to do.
++ */
++
++#define ARCH_HAS_SINGLE_STEP (1)
++
++static inline void tracehook_enable_single_step(struct task_struct *task)
++{
++ struct pt_regs *regs = task->thread.regs;
++ if (regs != NULL) {
++#if defined(CONFIG_PPC32) && (defined(CONFIG_40x) || defined(CONFIG_BOOKE))
++ task->thread.dbcr0 = DBCR0_IDM | DBCR0_IC;
++ regs->msr |= MSR_DE;
++#else
++ regs->msr |= MSR_SE;
++#endif
++ }
++ set_tsk_thread_flag(task, TIF_SINGLESTEP);
++}
++
++static inline void tracehook_disable_single_step(struct task_struct *task)
++{
++ struct pt_regs *regs = task->thread.regs;
++ if (regs != NULL) {
++#if defined(CONFIG_PPC32) && (defined(CONFIG_40x) || defined(CONFIG_BOOKE))
++ task->thread.dbcr0 = 0;
++ regs->msr &= ~MSR_DE;
++#else
++ regs->msr &= ~MSR_SE;
++#endif
++ }
++ clear_tsk_thread_flag(task, TIF_SINGLESTEP);
++}
++
++static inline int tracehook_single_step_enabled(struct task_struct *tsk)
++{
++ return test_tsk_thread_flag(tsk, TIF_SINGLESTEP);
++}
++
++static inline void tracehook_enable_syscall_trace(struct task_struct *tsk)
++{
++ set_tsk_thread_flag(tsk, TIF_SYSCALL_TRACE);
++}
++
++static inline void tracehook_disable_syscall_trace(struct task_struct *tsk)
++{
++ clear_tsk_thread_flag(tsk, TIF_SYSCALL_TRACE);
++}
++
++static inline void tracehook_abort_syscall(struct pt_regs *regs)
++{
++ regs->orig_gpr3 = -1L;
++}
++
++
++#endif
+Index: b/include/asm-x86_64/tracehook.h
+===================================================================
+--- /dev/null
++++ b/include/asm-x86_64/tracehook.h
+@@ -0,0 +1,51 @@
++/*
++ * Tracing hooks, x86-64 CPU support
++ *
++ * Copyright (C) 2006, 2007 Red Hat, Inc. All rights reserved.
++ *
++ * This copyrighted material is made available to anyone wishing to use,
++ * modify, copy, or redistribute it subject to the terms and conditions
++ * of the GNU General Public License v.2.
++ *
++ * Red Hat Author: Roland McGrath.
++ */
++
++#ifndef _ASM_TRACEHOOK_H
++#define _ASM_TRACEHOOK_H 1
++
++#include <linux/sched.h>
++#include <asm/ptrace.h>
++
++/*
++ * See linux/tracehook.h for the descriptions of what these need to do.
++ */
++
++#define ARCH_HAS_SINGLE_STEP (1)
++
++/* These two are defined in arch/x86_64/kernel/ptrace.c. */
++void tracehook_enable_single_step(struct task_struct *tsk);
++void tracehook_disable_single_step(struct task_struct *tsk);
++
++static inline int tracehook_single_step_enabled(struct task_struct *tsk)
++{
++ return test_tsk_thread_flag(tsk, TIF_SINGLESTEP);
++}
++
++static inline void tracehook_enable_syscall_trace(struct task_struct *tsk)
++{
++ set_tsk_thread_flag(tsk, TIF_SYSCALL_TRACE);
++}
++
++static inline void tracehook_disable_syscall_trace(struct task_struct *tsk)
++{
++ clear_tsk_thread_flag(tsk, TIF_SYSCALL_TRACE);
++}
++
++#define tracehook_syscall_callno(regs) (&(regs)->orig_rax)
++#define tracehook_syscall_retval(regs) (&(regs)->rax)
++static inline void tracehook_abort_syscall(struct pt_regs *regs)
++{
++ regs->orig_rax = -1L;
++}
++
++#endif
+Index: b/include/asm-x86_64/thread_info.h
+===================================================================
+--- a/include/asm-x86_64/thread_info.h
++++ b/include/asm-x86_64/thread_info.h
+@@ -115,7 +115,7 @@ static inline struct thread_info *stack_
+ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
+ #define TIF_SECCOMP 8 /* secure computing */
+ #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */
+-/* 16 free */
++#define TIF_FORCED_TF 16 /* true if TF in eflags artificially */
+ #define TIF_IA32 17 /* 32bit process */
+ #define TIF_FORK 18 /* ret_from_fork */
+ #define TIF_ABI_PENDING 19
+@@ -133,6 +133,7 @@ static inline struct thread_info *stack_
+ #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
+ #define _TIF_SECCOMP (1<<TIF_SECCOMP)
+ #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
++#define _TIF_FORCED_TF (1<<TIF_FORCED_TF)
+ #define _TIF_IA32 (1<<TIF_IA32)
+ #define _TIF_FORK (1<<TIF_FORK)
+ #define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
+Index: b/drivers/connector/cn_proc.c
+===================================================================
+--- a/drivers/connector/cn_proc.c
++++ b/drivers/connector/cn_proc.c
+@@ -63,8 +63,8 @@ void proc_fork_connector(struct task_str
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+ ev->what = PROC_EVENT_FORK;
+- ev->event_data.fork.parent_pid = task->real_parent->pid;
+- ev->event_data.fork.parent_tgid = task->real_parent->tgid;
++ ev->event_data.fork.parent_pid = task->parent->pid;
++ ev->event_data.fork.parent_tgid = task->parent->tgid;
+ ev->event_data.fork.child_pid = task->pid;
+ ev->event_data.fork.child_tgid = task->tgid;
+
+Index: b/mm/nommu.c
+===================================================================
+--- a/mm/nommu.c
++++ b/mm/nommu.c
+@@ -20,7 +20,7 @@
+ #include <linux/pagemap.h>
+ #include <linux/slab.h>
+ #include <linux/vmalloc.h>
+-#include <linux/ptrace.h>
++#include <linux/tracehook.h>
+ #include <linux/blkdev.h>
+ #include <linux/backing-dev.h>
+ #include <linux/mount.h>
+@@ -682,7 +682,7 @@ static unsigned long determine_vm_flags(
+ * it's being traced - otherwise breakpoints set in it may interfere
+ * with another untraced process
+ */
+- if ((flags & MAP_PRIVATE) && (current->ptrace & PT_PTRACED))
++ if ((flags & MAP_PRIVATE) && tracehook_expect_breakpoints(current))
+ vm_flags &= ~VM_MAYSHARE;
+
+ return vm_flags;
Index: kernel-2.6.spec
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/F-7/kernel-2.6.spec,v
retrieving revision 1.3287
retrieving revision 1.3288
diff -u -r1.3287 -r1.3288
--- kernel-2.6.spec 17 Jul 2007 20:22:35 -0000 1.3287
+++ kernel-2.6.spec 20 Jul 2007 18:48:03 -0000 1.3288
@@ -506,7 +506,23 @@
%if !%{nopatches}
-Patch10: linux-2.6-utrace.patch
+Patch10: linux-2.6-utrace-tracehook.patch
+Patch11: linux-2.6-utrace-tracehook-ia64.patch
+Patch12: linux-2.6-utrace-tracehook-sparc64.patch
+Patch13: linux-2.6-utrace-tracehook-s390.patch
+Patch14: linux-2.6-utrace-tracehook-um.patch
+Patch15: linux-2.6-utrace-tracehook-avr32.patch
+Patch16: linux-2.6-utrace-regset.patch
+Patch17: linux-2.6-utrace-regset-ia64.patch
+Patch18: linux-2.6-utrace-regset-sparc64.patch
+Patch19: linux-2.6-utrace-regset-s390.patch
+Patch20: linux-2.6-utrace-regset-avr32.patch
+Patch21: linux-2.6-utrace-core.patch
+Patch22: linux-2.6-utrace-ptrace-compat.patch
+Patch23: linux-2.6-utrace-ptrace-compat-ia64.patch
+Patch24: linux-2.6-utrace-ptrace-compat-sparc64.patch
+Patch25: linux-2.6-utrace-ptrace-compat-s390.patch
+Patch26: linux-2.6-utrace-ptrace-compat-avr32.patch
#Patch20: nouveau-drm.patch
Patch30: linux-2.6-sysrq-c.patch
Patch40: linux-2.6-x86-tune-generic.patch
@@ -1061,7 +1077,24 @@
ApplyPatch linux-2.6-sched-cfs.patch
# Roland's utrace ptrace replacement.
-ApplyPatch linux-2.6-utrace.patch -F2
+ApplyPatch linux-2.6-utrace-tracehook.patch -F2
+ApplyPatch linux-2.6-utrace-tracehook-ia64.patch
+ApplyPatch linux-2.6-utrace-tracehook-sparc64.patch
+ApplyPatch linux-2.6-utrace-tracehook-s390.patch
+ApplyPatch linux-2.6-utrace-tracehook-um.patch
+ApplyPatch linux-2.6-utrace-tracehook-avr32.patch
+ApplyPatch linux-2.6-utrace-regset.patch
+ApplyPatch linux-2.6-utrace-regset-ia64.patch
+ApplyPatch linux-2.6-utrace-regset-sparc64.patch
+ApplyPatch linux-2.6-utrace-regset-s390.patch
+ApplyPatch linux-2.6-utrace-regset-avr32.patch
+ApplyPatch linux-2.6-utrace-core.patch
+ApplyPatch linux-2.6-utrace-ptrace-compat.patch
+ApplyPatch linux-2.6-utrace-ptrace-compat-ia64.patch
+ApplyPatch linux-2.6-utrace-ptrace-compat-sparc64.patch
+ApplyPatch linux-2.6-utrace-ptrace-compat-s390.patch
+ApplyPatch linux-2.6-utrace-ptrace-compat-avr32.patch
+
# setuid /proc/self/maps fix. (dependent on utrace)
ApplyPatch linux-2.6-proc-self-maps-fix.patch
@@ -2228,6 +2261,10 @@
%endif
%changelog
+* Fri Jul 20 2007 Chuck Ebbert <cebbert at redhat.com>
+- utrace update
+- CFS scheduler update
+
* Tue Jul 17 2007 John W. Linville <linville at redhat.com>
- update wireless bits
linux-2.6-sched-cfs.patch:
Index: linux-2.6-sched-cfs.patch
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/F-7/linux-2.6-sched-cfs.patch,v
retrieving revision 1.6
retrieving revision 1.7
diff -u -r1.6 -r1.7
--- linux-2.6-sched-cfs.patch 17 Jul 2007 19:01:14 -0000 1.6
+++ linux-2.6-sched-cfs.patch 20 Jul 2007 18:48:03 -0000 1.7
@@ -16,12 +16,20 @@
4bd77321a833077c5c9ac7b9d284e261e4a8906e
sched: fix show_task()/show_tasks() output
-[not in mainline yet]
+9439aab8dbc33c2c03c3a19dba267360383ba38c
sched: fix newly idle load balance in case of SMT
-[not in mainline yet]
+969bb4e4032dac67287951d8f6642a3b5119694e
sched: fix the all pinned logic in load_balance_newidle()
+[remove the original sys_time() speedup]
+
+4e44f3497d41db4c3b9051c61410dee8ae4fb49c
+sys_time() speedup
+
+[not merged yet]
+time: introduce xtime_seconds
+
Index: linux/Documentation/kernel-parameters.txt
===================================================================
--- linux.orig/Documentation/kernel-parameters.txt
@@ -7664,10 +7672,58 @@
.ctl_name = KERN_PANIC,
.procname = "panic",
.data = &panic_timeout,
-Index: linux/kernel/time.c
-===================================================================
---- linux.orig/kernel/time.c
-+++ linux/kernel/time.c
+Gitweb: http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=4e44f3497d41db4c3b9051c61410dee8ae4fb49c
+Commit: 4e44f3497d41db4c3b9051c61410dee8ae4fb49c
+Parent: f482394ccbca7234d29cc146d4a2b94f976ce5a1
+Author: Ingo Molnar <mingo at elte.hu>
+AuthorDate: Sun Jul 15 23:41:18 2007 -0700
+Committer: Linus Torvalds <torvalds at woody.linux-foundation.org>
+CommitDate: Mon Jul 16 09:05:48 2007 -0700
+
+ sys_time() speedup
+
+ Improve performance of sys_time(). sys_time() returns time in seconds, but
+ it does so by calling do_gettimeofday() and then returning the tv_sec
+ portion of the GTOD time. But the data structure "xtime", which is updated
+ by every timer/scheduler tick, already offers HZ granularity time.
+
+ The patch improves the sysbench OLTP macrobenchmark significantly:
+
+ 2.6.22-rc6:
+
+ #threads
+ 1: transactions: 3733 (373.21 per sec.)
+ 2: transactions: 6676 (667.46 per sec.)
+ 3: transactions: 6957 (695.50 per sec.)
+ 4: transactions: 7055 (705.48 per sec.)
+ 5: transactions: 6596 (659.33 per sec.)
+
+ 2.6.22-rc6 + sys_time.patch:
+
+ 1: transactions: 4005 (400.47 per sec.)
+ 2: transactions: 7379 (737.77 per sec.)
+ 3: transactions: 7347 (734.49 per sec.)
+ 4: transactions: 7468 (746.65 per sec.)
+ 5: transactions: 7428 (742.47 per sec.)
+
+ Mixed API uses of gettimeofday() and time() are guaranteed to be coherent
+ via the use of a at-most-once-per-second slowpath that updates xtime.
+
+ [akpm at linux-foundation.org: build fixes]
+ Signed-off-by: Ingo Molnar <mingo at elte.hu>
+ Cc: John Stultz <johnstul at us.ibm.com>
+ Cc: Thomas Gleixner <tglx at linutronix.de>
+ Cc: Roman Zippel <zippel at linux-m68k.org>
+ Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
+ Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
+---
+ kernel/time.c | 32 ++++++++++++++++++++++++--------
+ 1 files changed, 24 insertions(+), 8 deletions(-)
+
+diff --git a/kernel/time.c b/kernel/time.c
+index f04791f..ffe1914 100644
+--- a/kernel/time.c
++++ b/kernel/time.c
@@ -57,14 +57,17 @@ EXPORT_SYMBOL(sys_tz);
*/
asmlinkage long sys_time(time_t __user * tloc)
@@ -7691,11 +7747,12 @@
i = -EFAULT;
}
return i;
-@@ -373,6 +376,20 @@ void do_gettimeofday (struct timeval *tv
+@@ -373,12 +376,25 @@ void do_gettimeofday (struct timeval *tv)
tv->tv_sec = sec;
tv->tv_usec = usec;
-+
+-}
+
+ /*
+ * Make sure xtime.tv_sec [returned by sys_time()] always
+ * follows the gettimeofday() result precisely. This
@@ -7707,41 +7764,26 @@
+
+ write_seqlock_irqsave(&xtime_lock, flags);
+ update_wall_time();
-+ write_seqlock_irqrestore(&xtime_lock, flags);
++ write_sequnlock_irqrestore(&xtime_lock, flags);
+ }
- }
-
++}
EXPORT_SYMBOL(do_gettimeofday);
-Index: linux/lib/Kconfig.debug
-===================================================================
---- linux.orig/lib/Kconfig.debug
-+++ linux/lib/Kconfig.debug
-@@ -105,6 +105,15 @@ config DETECT_SOFTLOCKUP
- can be detected via the NMI-watchdog, on platforms that
- support it.)
-
-+config SCHED_DEBUG
-+ bool "Collect scheduler debugging info"
-+ depends on DEBUG_KERNEL && PROC_FS
-+ default y
-+ help
-+ If you say Y here, the /proc/sched_debug file will be provided
-+ that can help debug the scheduler. The runtime overhead of this
-+ option is minimal.
-+
- config SCHEDSTATS
- bool "Collect scheduler statistics"
- depends on DEBUG_KERNEL && PROC_FS
---- linux-2.6.22.noarch/kernel/time.c~ 2007-07-10 13:43:47.000000000 -0400
-+++ linux-2.6.22.noarch/kernel/time.c 2007-07-10 13:43:53.000000000 -0400
-@@ -35,6 +35,7 @@
- #include <linux/security.h>
- #include <linux/fs.h>
- #include <linux/module.h>
-+#include <linux/seqlock.h>
- #include <asm/uaccess.h>
- #include <asm/unistd.h>
++#else /* CONFIG_TIME_INTERPOLATION */
+
+-#else
+ #ifndef CONFIG_GENERIC_TIME
+ /*
+ * Simulate gettimeofday using do_gettimeofday which only allows a timeval
+@@ -394,7 +410,7 @@ void getnstimeofday(struct timespec *tv)
+ }
+ EXPORT_SYMBOL_GPL(getnstimeofday);
+ #endif
+-#endif
++#endif /* CONFIG_TIME_INTERPOLATION */
+
+ /* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
+ * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
Gitweb: http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=4bd77321a833077c5c9ac7b9d284e261e4a8906e
Commit: 4bd77321a833077c5c9ac7b9d284e261e4a8906e
Parent: 45f384a64f0769bb9a3caf0516de88a629f48e61
@@ -7910,3 +7952,220 @@
cpu_clear(cpu_of(busiest), cpus);
if (!cpus_empty(cpus))
goto redo;
+Subject: time: introduce xtime_seconds
+From: Ingo Molnar <mingo at elte.hu>
+
+introduce the xtime_seconds optimization. This is a read-mostly
+low-resolution time source available to sys_time() and kernel-internal
+use. This variable is kept uptodate atomically, and it's monotically
+increased, every time some time interface constructs an xtime-alike time
+result that overflows the seconds value. (it's updated from the timer
+interrupt as well)
+
+this way high-resolution time results update their seconds component at
+the same time sys_time() does it:
+
+ 1184858832999989000
+ 1184858832000000000
+ 1184858832999992000
+ 1184858832000000000
+ 1184858832999996000
+ 1184858832000000000
+ 1184858832999999000
+ 1184858832000000000
+ 1184858833000003000
+ 1184858833000000000
+ 1184858833000006000
+ 1184858833000000000
+ 1184858833000009000
+ 1184858833000000000
+
+ [ these are nsec time results from alternating calls to sys_time() and
+ sys_gettimeofday(), recorded at the seconds boundary. ]
+
+instead of the previous (non-coherent) behavior:
+
+ 1184848950999987000
+ 1184848950000000000
+ 1184848950999990000
+ 1184848950000000000
+ 1184848950999994000
+ 1184848950000000000
+ 1184848950999997000
+ 1184848950000000000
+ 1184848951000001000
+ 1184848950000000000
+ 1184848951000005000
+ 1184848950000000000
+ 1184848951000008000
+ 1184848950000000000
+ 1184848951000011000
+ 1184848950000000000
+ 1184848951000015000
+
+Signed-off-by: Ingo Molnar <mingo at elte.hu>
+---
+ include/linux/time.h | 13 +++++++++++--
+ kernel/time.c | 25 ++++++-------------------
+ kernel/time/timekeeping.c | 28 ++++++++++++++++++++++++----
+ 3 files changed, 41 insertions(+), 25 deletions(-)
+
+Index: linux/include/linux/time.h
+===================================================================
+--- linux.orig/include/linux/time.h
++++ linux/include/linux/time.h
+@@ -91,19 +91,28 @@ static inline struct timespec timespec_s
+ extern struct timespec xtime;
+ extern struct timespec wall_to_monotonic;
+ extern seqlock_t xtime_lock __attribute__((weak));
++extern unsigned long xtime_seconds;
+
+ extern unsigned long read_persistent_clock(void);
+ void timekeeping_init(void);
+
++extern void __update_xtime_seconds(unsigned long new_xtime_seconds);
++
++static inline void update_xtime_seconds(unsigned long new_xtime_seconds)
++{
++ if (unlikely((long)(new_xtime_seconds - xtime_seconds) > 0))
++ __update_xtime_seconds(new_xtime_seconds);
++}
++
+ static inline unsigned long get_seconds(void)
+ {
+- return xtime.tv_sec;
++ return xtime_seconds;
+ }
+
+ struct timespec current_kernel_time(void);
+
+ #define CURRENT_TIME (current_kernel_time())
+-#define CURRENT_TIME_SEC ((struct timespec) { xtime.tv_sec, 0 })
++#define CURRENT_TIME_SEC ((struct timespec) { xtime_seconds, 0 })
+
+ extern void do_gettimeofday(struct timeval *tv);
+ extern int do_settimeofday(struct timespec *tv);
+Index: linux/kernel/time.c
+===================================================================
+--- linux.orig/kernel/time.c
++++ linux/kernel/time.c
+@@ -58,11 +58,10 @@ EXPORT_SYMBOL(sys_tz);
+ asmlinkage long sys_time(time_t __user * tloc)
+ {
+ /*
+- * We read xtime.tv_sec atomically - it's updated
+- * atomically by update_wall_time(), so no need to
+- * even read-lock the xtime seqlock:
++ * We read xtime_seconds atomically - it's updated
++ * atomically by update_xtime_seconds():
+ */
+- time_t i = xtime.tv_sec;
++ time_t i = xtime_seconds;
+
+ smp_rmb(); /* sys_time() results are coherent */
+
+@@ -226,11 +225,11 @@ inline struct timespec current_kernel_ti
+
+ do {
+ seq = read_seqbegin(&xtime_lock);
+-
++
+ now = xtime;
+ } while (read_seqretry(&xtime_lock, seq));
+
+- return now;
++ return now;
+ }
+
+ EXPORT_SYMBOL(current_kernel_time);
+@@ -377,19 +376,7 @@ void do_gettimeofday (struct timeval *tv
+ tv->tv_sec = sec;
+ tv->tv_usec = usec;
+
+- /*
+- * Make sure xtime.tv_sec [returned by sys_time()] always
+- * follows the gettimeofday() result precisely. This
+- * condition is extremely unlikely, it can hit at most
+- * once per second:
+- */
+- if (unlikely(xtime.tv_sec != tv->tv_sec)) {
+- unsigned long flags;
+-
+- write_seqlock_irqsave(&xtime_lock, flags);
+- update_wall_time();
+- write_sequnlock_irqrestore(&xtime_lock, flags);
+- }
++ update_xtime_seconds(sec);
+ }
+ EXPORT_SYMBOL(do_gettimeofday);
+
+Index: linux/kernel/time/timekeeping.c
+===================================================================
+--- linux.orig/kernel/time/timekeeping.c
++++ linux/kernel/time/timekeeping.c
+@@ -45,13 +45,27 @@ EXPORT_SYMBOL(xtime_lock);
+ * the usual normalization.
+ */
+ struct timespec xtime __attribute__ ((aligned (16)));
+-struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
+-
+ EXPORT_SYMBOL(xtime);
+
++struct timespec wall_to_monotonic __attribute__ ((aligned (16))) __read_mostly;
++
++unsigned long xtime_seconds __read_mostly;
++EXPORT_SYMBOL(xtime_seconds);
++
++/* pointer to current clocksource: */
++static struct clocksource *clock __read_mostly;
+
+-static struct clocksource *clock; /* pointer to current clocksource */
++/*
++ * Called when either xtime or any xtime-alike result back to
++ * user-space overflows the xtime_seconds field:
++ */
++void __update_xtime_seconds(unsigned long new_xtime_seconds)
++{
++ unsigned long old_xtime_seconds = xtime_seconds;
+
++ if ((long)(new_xtime_seconds - old_xtime_seconds) > 0)
++ cmpxchg(&xtime_seconds, old_xtime_seconds, new_xtime_seconds);
++}
+
+ #ifdef CONFIG_GENERIC_TIME
+ /**
+@@ -100,6 +113,8 @@ static inline void __get_realtime_clock_
+ } while (read_seqretry(&xtime_lock, seq));
+
+ timespec_add_ns(ts, nsecs);
++
++ update_xtime_seconds(ts->tv_sec);
+ }
+
+ /**
+@@ -256,6 +271,8 @@ void __init timekeeping_init(void)
+ clock->cycle_last = clocksource_read(clock);
+
+ xtime.tv_sec = sec;
++ update_xtime_seconds(sec);
++
+ xtime.tv_nsec = 0;
+ set_normalized_timespec(&wall_to_monotonic,
+ -xtime.tv_sec, -xtime.tv_nsec);
+@@ -290,6 +307,8 @@ static int timekeeping_resume(struct sys
+ unsigned long sleep_length = now - timekeeping_suspend_time;
+
+ xtime.tv_sec += sleep_length;
++ update_xtime_seconds(xtime.tv_sec);
++
+ wall_to_monotonic.tv_sec -= sleep_length;
+ }
+ /* re-base the last cycle value */
+@@ -464,6 +483,7 @@ void update_wall_time(void)
+ clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift;
+ xtime.tv_sec++;
+ second_overflow();
++ update_xtime_seconds(xtime.tv_sec);
+ }
+
+ /* interpolator bits */
--- linux-2.6-utrace.patch DELETED ---
More information about the scm-commits
mailing list