wrap access to thread_info
authorRoman Zippel <zippel@linux-m68k.org>
Wed, 9 May 2007 09:35:16 +0000 (02:35 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Wed, 9 May 2007 19:30:56 +0000 (12:30 -0700)
Recently a few direct accesses to the thread_info in the task structure snuck
back, so this wraps them with the appropriate wrapper.

Signed-off-by: Roman Zippel <zippel@linux-m68k.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
13 files changed:
arch/avr32/kernel/process.c
arch/avr32/kernel/ptrace.c
arch/blackfin/kernel/asm-offsets.c
arch/i386/kernel/traps.c
arch/m68knommu/kernel/asm-offsets.c
arch/mips/kernel/smtc.c
arch/x86_64/kernel/irq.c
include/asm-i386/thread_info.h
include/asm-ia64/thread_info.h
include/asm-mips/system.h
include/asm-parisc/compat.h
include/asm-x86_64/thread_info.h
kernel/mutex.c

index 4e4181ed1c6d52a70a5308a50aa4a2a7761099f0..13f988402613a038133f0e84289565210382600a 100644 (file)
@@ -330,13 +330,13 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
 {
        struct pt_regs *childregs;
 
-       childregs = ((struct pt_regs *)(THREAD_SIZE + (unsigned long)p->thread_info)) - 1;
+       childregs = ((struct pt_regs *)(THREAD_SIZE + (unsigned long)task_stack_page(p))) - 1;
        *childregs = *regs;
 
        if (user_mode(regs))
                childregs->sp = usp;
        else
-               childregs->sp = (unsigned long)p->thread_info + THREAD_SIZE;
+               childregs->sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
 
        childregs->r12 = 0; /* Set return value for child */
 
@@ -403,7 +403,7 @@ unsigned long get_wchan(struct task_struct *p)
        if (!p || p == current || p->state == TASK_RUNNING)
                return 0;
 
-       stack_page = (unsigned long)p->thread_info;
+       stack_page = (unsigned long)task_stack_page(p);
        BUG_ON(!stack_page);
 
        /*
index 8ac74dddbbdee588b45bdf5598f3b229226fd32a..3c36c2d1614827894a4bfff8d823ce9bf194d1ba 100644 (file)
@@ -24,7 +24,7 @@
 
 static struct pt_regs *get_user_regs(struct task_struct *tsk)
 {
-       return (struct pt_regs *)((unsigned long) tsk->thread_info +
+       return (struct pt_regs *)((unsigned long)task_stack_page(tsk) +
                                  THREAD_SIZE - sizeof(struct pt_regs));
 }
 
index 41d9a9f897002f5536ecca2eeae0ad07b09d7cbd..e455f4504509518c9791163fd05d20d443434735 100644 (file)
@@ -46,7 +46,7 @@ int main(void)
        DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
        DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
        DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
-       DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, thread_info));
+       DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
        DEFINE(TASK_MM, offsetof(struct task_struct, mm));
        DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
        DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, pending));
index 4bec0cbf407ae4f6e0f0d158578e28bad9e1ee77..c05e7e861b29404e9c226d05a33d2c712e5a13ee 100644 (file)
@@ -305,7 +305,7 @@ void show_registers(struct pt_regs *regs)
               regs->xds & 0xffff, regs->xes & 0xffff, regs->xfs & 0xffff, gs, ss);
        printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
                TASK_COMM_LEN, current->comm, current->pid,
-               current_thread_info(), current, current->thread_info);
+               current_thread_info(), current, task_thread_info(current));
        /*
         * When in-kernel, we also print out the stack and code at the
         * time of the fault..
index b988c7bdc6e4d6fd78656b68b1e41fdea1131da0..7cd183d346ef6b06264f8f2b95a5ea154b001125 100644 (file)
@@ -31,7 +31,7 @@ int main(void)
        DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
        DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
        DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
-       DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, thread_info));
+       DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
        DEFINE(TASK_MM, offsetof(struct task_struct, mm));
        DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
 
index 5dcfab6b288efbbca69046a3bc66970736d84cc8..b361edb83dc63e009e9a1a39281e9c132e946bfe 100644 (file)
@@ -560,7 +560,7 @@ void smtc_boot_secondary(int cpu, struct task_struct *idle)
        write_tc_gpr_sp(__KSTK_TOS(idle));
 
        /* global pointer */
-       write_tc_gpr_gp((unsigned long)idle->thread_info);
+       write_tc_gpr_gp((unsigned long)task_thread_info(idle));
 
        smtc_status |= SMTC_MTC_ACTIVE;
        write_tc_c0_tchalt(0);
index 3bc30d2c13d3289809cd538f07cef4fc8033c0dc..3eaceac3248140ae85d6643dae9f61cb3a01a8a0 100644 (file)
@@ -32,7 +32,7 @@ atomic_t irq_err_count;
  */
 static inline void stack_overflow_check(struct pt_regs *regs)
 {
-       u64 curbase = (u64) current->thread_info;
+       u64 curbase = (u64)task_stack_page(current);
        static unsigned long warned = -60*HZ;
 
        if (regs->rsp >= curbase && regs->rsp <= curbase + THREAD_SIZE &&
index bf01d4b342bd40978997695c6298f65b55232c1d..4cb0f91ae64f3dc01e7a945012fe6184be784fd4 100644 (file)
@@ -172,7 +172,7 @@ static inline struct thread_info *current_thread_info(void)
 #define TS_USEDFPU             0x0001  /* FPU was used by this task this quantum (SMP) */
 #define TS_POLLING             0x0002  /* True if in idle loop and not sleeping */
 
-#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING)
+#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
 
 #endif /* __KERNEL__ */
 
index 91698599f91886f73bb591b6e0b12c84eaa1e2ac..d281475065856341d0a34b7413b5c8ba8c9556f4 100644 (file)
@@ -110,6 +110,6 @@ struct thread_info {
 
 #define TS_POLLING             1       /* true if in idle loop and not sleeping */
 
-#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING)
+#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
 
 #endif /* _ASM_IA64_THREAD_INFO_H */
index 30f23a2b46ca0c5d32afd99bff1a52ff30254890..3713d256d36958d52d6ac750315f414c3610ff77 100644 (file)
@@ -55,7 +55,7 @@ do {                                                                  \
        if (cpu_has_dsp)                                                \
                __save_dsp(prev);                                       \
        next->thread.emulated_fp = 0;                                   \
-       (last) = resume(prev, next, next->thread_info);                 \
+       (last) = resume(prev, next, task_thread_info(next));            \
        if (cpu_has_dsp)                                                \
                __restore_dsp(current);                                 \
 } while(0)
index fe8579023531d5a8e8788eea06c7f672f161fa36..11f4222597a067c1f9c41b60b222078d0bf4dfc9 100644 (file)
@@ -152,7 +152,7 @@ static __inline__ void __user *compat_alloc_user_space(long len)
 
 static inline int __is_compat_task(struct task_struct *t)
 {
-       return test_ti_thread_flag(t->thread_info, TIF_32BIT);
+       return test_ti_thread_flag(task_thread_info(t), TIF_32BIT);
 }
 
 static inline int is_compat_task(void)
index 74a6c74397f72bee7619075dd1fba64d7d13e136..10bb5a8ed688889a68a99161021e9c0704d760d9 100644 (file)
@@ -162,7 +162,7 @@ static inline struct thread_info *stack_thread_info(void)
 #define TS_COMPAT              0x0002  /* 32bit syscall active */
 #define TS_POLLING             0x0004  /* true if in idle loop and not sleeping */
 
-#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING)
+#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
 
 #endif /* __KERNEL__ */
 
index e7cbbb82765b4f66348e927fd2911ac4d9078078..303eab18484b1c63b7a678bf2b24a928cfc847fa 100644 (file)
@@ -133,7 +133,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
 
        debug_mutex_lock_common(lock, &waiter);
        mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
-       debug_mutex_add_waiter(lock, &waiter, task->thread_info);
+       debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
 
        /* add waiting tasks to the end of the waitqueue (FIFO): */
        list_add_tail(&waiter.list, &lock->wait_list);
@@ -159,7 +159,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
                 */
                if (unlikely(state == TASK_INTERRUPTIBLE &&
                                                signal_pending(task))) {
-                       mutex_remove_waiter(lock, &waiter, task->thread_info);
+                       mutex_remove_waiter(lock, &waiter, task_thread_info(task));
                        mutex_release(&lock->dep_map, 1, _RET_IP_);
                        spin_unlock_mutex(&lock->wait_lock, flags);
 
@@ -175,8 +175,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
        }
 
        /* got the lock - rejoice! */
-       mutex_remove_waiter(lock, &waiter, task->thread_info);
-       debug_mutex_set_owner(lock, task->thread_info);
+       mutex_remove_waiter(lock, &waiter, task_thread_info(task));
+       debug_mutex_set_owner(lock, task_thread_info(task));
 
        /* set it to 0 if there are no waiters left: */
        if (likely(list_empty(&lock->wait_list)))