Merge tag 'for-linus-4.5-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-drm-fsl-dcu.git] / kernel / sched / cputime.c
index 851b00f344ae27cdb670378be9809c0400790bb1..b2ab2ffb1adc021de289d3e3c857de60ad2428b6 100644 (file)
@@ -469,7 +469,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
        cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
        struct rq *rq = this_rq();
 
-       if (vtime_accounting_enabled())
+       if (vtime_accounting_cpu_enabled())
                return;
 
        if (sched_clock_irqtime) {
@@ -683,7 +683,7 @@ static cputime_t get_vtime_delta(struct task_struct *tsk)
 {
        unsigned long long delta = vtime_delta(tsk);
 
-       WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_SLEEPING);
+       WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE);
        tsk->vtime_snap += delta;
 
        /* CHECKME: always safe to convert nsecs to cputime? */
@@ -699,37 +699,37 @@ static void __vtime_account_system(struct task_struct *tsk)
 
 void vtime_account_system(struct task_struct *tsk)
 {
-       write_seqlock(&tsk->vtime_seqlock);
+       write_seqcount_begin(&tsk->vtime_seqcount);
        __vtime_account_system(tsk);
-       write_sequnlock(&tsk->vtime_seqlock);
+       write_seqcount_end(&tsk->vtime_seqcount);
 }
 
 void vtime_gen_account_irq_exit(struct task_struct *tsk)
 {
-       write_seqlock(&tsk->vtime_seqlock);
+       write_seqcount_begin(&tsk->vtime_seqcount);
        __vtime_account_system(tsk);
        if (context_tracking_in_user())
                tsk->vtime_snap_whence = VTIME_USER;
-       write_sequnlock(&tsk->vtime_seqlock);
+       write_seqcount_end(&tsk->vtime_seqcount);
 }
 
 void vtime_account_user(struct task_struct *tsk)
 {
        cputime_t delta_cpu;
 
-       write_seqlock(&tsk->vtime_seqlock);
+       write_seqcount_begin(&tsk->vtime_seqcount);
        delta_cpu = get_vtime_delta(tsk);
        tsk->vtime_snap_whence = VTIME_SYS;
        account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
-       write_sequnlock(&tsk->vtime_seqlock);
+       write_seqcount_end(&tsk->vtime_seqcount);
 }
 
 void vtime_user_enter(struct task_struct *tsk)
 {
-       write_seqlock(&tsk->vtime_seqlock);
+       write_seqcount_begin(&tsk->vtime_seqcount);
        __vtime_account_system(tsk);
        tsk->vtime_snap_whence = VTIME_USER;
-       write_sequnlock(&tsk->vtime_seqlock);
+       write_seqcount_end(&tsk->vtime_seqcount);
 }
 
 void vtime_guest_enter(struct task_struct *tsk)
@@ -741,19 +741,19 @@ void vtime_guest_enter(struct task_struct *tsk)
         * synchronization against the reader (task_gtime())
         * that can thus safely catch up with a tickless delta.
         */
-       write_seqlock(&tsk->vtime_seqlock);
+       write_seqcount_begin(&tsk->vtime_seqcount);
        __vtime_account_system(tsk);
        current->flags |= PF_VCPU;
-       write_sequnlock(&tsk->vtime_seqlock);
+       write_seqcount_end(&tsk->vtime_seqcount);
 }
 EXPORT_SYMBOL_GPL(vtime_guest_enter);
 
 void vtime_guest_exit(struct task_struct *tsk)
 {
-       write_seqlock(&tsk->vtime_seqlock);
+       write_seqcount_begin(&tsk->vtime_seqcount);
        __vtime_account_system(tsk);
        current->flags &= ~PF_VCPU;
-       write_sequnlock(&tsk->vtime_seqlock);
+       write_seqcount_end(&tsk->vtime_seqcount);
 }
 EXPORT_SYMBOL_GPL(vtime_guest_exit);
 
@@ -766,24 +766,26 @@ void vtime_account_idle(struct task_struct *tsk)
 
 void arch_vtime_task_switch(struct task_struct *prev)
 {
-       write_seqlock(&prev->vtime_seqlock);
-       prev->vtime_snap_whence = VTIME_SLEEPING;
-       write_sequnlock(&prev->vtime_seqlock);
+       write_seqcount_begin(&prev->vtime_seqcount);
+       prev->vtime_snap_whence = VTIME_INACTIVE;
+       write_seqcount_end(&prev->vtime_seqcount);
 
-       write_seqlock(&current->vtime_seqlock);
+       write_seqcount_begin(&current->vtime_seqcount);
        current->vtime_snap_whence = VTIME_SYS;
        current->vtime_snap = sched_clock_cpu(smp_processor_id());
-       write_sequnlock(&current->vtime_seqlock);
+       write_seqcount_end(&current->vtime_seqcount);
 }
 
 void vtime_init_idle(struct task_struct *t, int cpu)
 {
        unsigned long flags;
 
-       write_seqlock_irqsave(&t->vtime_seqlock, flags);
+       local_irq_save(flags);
+       write_seqcount_begin(&t->vtime_seqcount);
        t->vtime_snap_whence = VTIME_SYS;
        t->vtime_snap = sched_clock_cpu(cpu);
-       write_sequnlock_irqrestore(&t->vtime_seqlock, flags);
+       write_seqcount_end(&t->vtime_seqcount);
+       local_irq_restore(flags);
 }
 
 cputime_t task_gtime(struct task_struct *t)
@@ -791,17 +793,17 @@ cputime_t task_gtime(struct task_struct *t)
        unsigned int seq;
        cputime_t gtime;
 
-       if (!context_tracking_is_enabled())
+       if (!vtime_accounting_enabled())
                return t->gtime;
 
        do {
-               seq = read_seqbegin(&t->vtime_seqlock);
+               seq = read_seqcount_begin(&t->vtime_seqcount);
 
                gtime = t->gtime;
-               if (t->flags & PF_VCPU)
+               if (t->vtime_snap_whence == VTIME_SYS && t->flags & PF_VCPU)
                        gtime += vtime_delta(t);
 
-       } while (read_seqretry(&t->vtime_seqlock, seq));
+       } while (read_seqcount_retry(&t->vtime_seqcount, seq));
 
        return gtime;
 }
@@ -824,7 +826,7 @@ fetch_task_cputime(struct task_struct *t,
                *udelta = 0;
                *sdelta = 0;
 
-               seq = read_seqbegin(&t->vtime_seqlock);
+               seq = read_seqcount_begin(&t->vtime_seqcount);
 
                if (u_dst)
                        *u_dst = *u_src;
@@ -832,7 +834,7 @@ fetch_task_cputime(struct task_struct *t,
                        *s_dst = *s_src;
 
                /* Task is sleeping, nothing to add */
-               if (t->vtime_snap_whence == VTIME_SLEEPING ||
+               if (t->vtime_snap_whence == VTIME_INACTIVE ||
                    is_idle_task(t))
                        continue;
 
@@ -848,7 +850,7 @@ fetch_task_cputime(struct task_struct *t,
                        if (t->vtime_snap_whence == VTIME_SYS)
                                *sdelta = delta;
                }
-       } while (read_seqretry(&t->vtime_seqlock, seq));
+       } while (read_seqcount_retry(&t->vtime_seqcount, seq));
 }
 
 
@@ -856,6 +858,14 @@ void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime)
 {
        cputime_t udelta, sdelta;
 
+       if (!vtime_accounting_enabled()) {
+               if (utime)
+                       *utime = t->utime;
+               if (stime)
+                       *stime = t->stime;
+               return;
+       }
+
        fetch_task_cputime(t, utime, stime, &t->utime,
                           &t->stime, &udelta, &sdelta);
        if (utime)
@@ -869,6 +879,14 @@ void task_cputime_scaled(struct task_struct *t,
 {
        cputime_t udelta, sdelta;
 
+       if (!vtime_accounting_enabled()) {
+               if (utimescaled)
+                       *utimescaled = t->utimescaled;
+               if (stimescaled)
+                       *stimescaled = t->stimescaled;
+               return;
+       }
+
        fetch_task_cputime(t, utimescaled, stimescaled,
                           &t->utimescaled, &t->stimescaled, &udelta, &sdelta);
        if (utimescaled)