MIPS: Get rid of finish_arch_switch().
authorRalf Baechle <ralf@linux-mips.org>
Wed, 29 Jul 2015 10:14:42 +0000 (12:14 +0200)
committerRalf Baechle <ralf@linux-mips.org>
Thu, 3 Sep 2015 10:08:01 +0000 (12:08 +0200)
MIPS was using finish_arch_switch() as a hook to restore and initialize
CPU context for all threads, even newly created kernel and user threads.
This is however entirely solvable within switch_to() so get rid of
finish_arch_switch() which is in the way of scheduler cleanups.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
arch/mips/include/asm/switch_to.h

index 7163cd7fdd69a622892e4be83acbe0450e8f2af0..9733cd0266e4b948879e9ec924622c606b59f770 100644 (file)
@@ -83,45 +83,43 @@ do {        if (cpu_has_rw_llb) {                                           \
        }                                                               \
 } while (0)
 
+/*
+ * For newly created kernel threads switch_to() will return to
+ * ret_from_kernel_thread, newly created user threads to ret_from_fork.
+ * That is, everything following resume() will be skipped for new threads.
+ * So everything that matters to new threads should be placed before resume().
+ */
 #define switch_to(prev, next, last)                                    \
 do {                                                                   \
-       u32 __c0_stat;                                                  \
        s32 __fpsave = FP_SAVE_NONE;                                    \
        __mips_mt_fpaff_switch_to(prev);                                \
-       if (cpu_has_dsp)                                                \
+       if (cpu_has_dsp) {                                              \
                __save_dsp(prev);                                       \
-       if (cop2_present && (KSTK_STATUS(prev) & ST0_CU2)) {            \
-               if (cop2_lazy_restore)                                  \
-                       KSTK_STATUS(prev) &= ~ST0_CU2;                  \
-               __c0_stat = read_c0_status();                           \
-               write_c0_status(__c0_stat | ST0_CU2);                   \
-               cop2_save(prev);                                        \
-               write_c0_status(__c0_stat & ~ST0_CU2);                  \
+               __restore_dsp(next);                                    \
+       }                                                               \
+       if (cop2_present) {                                             \
+               set_c0_status(ST0_CU2);                                 \
+               if ((KSTK_STATUS(prev) & ST0_CU2)) {                    \
+                       if (cop2_lazy_restore)                          \
+                               KSTK_STATUS(prev) &= ~ST0_CU2;          \
+                       cop2_save(prev);                                \
+               }                                                       \
+               if (KSTK_STATUS(next) & ST0_CU2 &&                      \
+                   !cop2_lazy_restore) {                               \
+                       cop2_restore(next);                             \
+               }                                                       \
+               clear_c0_status(ST0_CU2);                               \
        }                                                               \
        __clear_software_ll_bit();                                      \
        if (test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU))          \
                __fpsave = FP_SAVE_SCALAR;                              \
        if (test_and_clear_tsk_thread_flag(prev, TIF_USEDMSA))          \
                __fpsave = FP_SAVE_VECTOR;                              \
-       (last) = resume(prev, next, task_thread_info(next), __fpsave);  \
-} while (0)
-
-#define finish_arch_switch(prev)                                       \
-do {                                                                   \
-       u32 __c0_stat;                                                  \
-       if (cop2_present && !cop2_lazy_restore &&                       \
-                       (KSTK_STATUS(current) & ST0_CU2)) {             \
-               __c0_stat = read_c0_status();                           \
-               write_c0_status(__c0_stat | ST0_CU2);                   \
-               cop2_restore(current);                                  \
-               write_c0_status(__c0_stat & ~ST0_CU2);                  \
-       }                                                               \
-       if (cpu_has_dsp)                                                \
-               __restore_dsp(current);                                 \
        if (cpu_has_userlocal)                                          \
-               write_c0_userlocal(current_thread_info()->tp_value);    \
+               write_c0_userlocal(task_thread_info(next)->tp_value);   \
        __restore_watch();                                              \
        disable_msa();                                                  \
+       (last) = resume(prev, next, task_thread_info(next), __fpsave);  \
 } while (0)
 
 #endif /* _ASM_SWITCH_TO_H */