Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[linux-drm-fsl-dcu.git] / kernel / workqueue.c
index db49886bfae17271ee19709a984c57c208c5065b..020d1fff57dce7839735bf7df6362a57ff747b55 100644 (file)
@@ -96,13 +96,13 @@ static inline void set_wq_data(struct work_struct *work, void *wq)
        BUG_ON(!work_pending(work));
 
        new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
-       new |= work->management & WORK_STRUCT_FLAG_MASK;
-       work->management = new;
+       new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
+       atomic_long_set(&work->data, new);
 }
 
 static inline void *get_wq_data(struct work_struct *work)
 {
-       return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK);
+       return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
 }
 
 static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
@@ -133,7 +133,7 @@ static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work
                list_del_init(&work->entry);
                spin_unlock_irqrestore(&cwq->lock, flags);
 
-               if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management))
+               if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
                        work_release(work);
                f(work);
 
@@ -206,7 +206,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
 {
        int ret = 0, cpu = get_cpu();
 
-       if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
+       if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
                if (unlikely(is_single_threaded(wq)))
                        cpu = singlethread_cpu;
                BUG_ON(!list_empty(&work->entry));
@@ -233,7 +233,7 @@ static void delayed_work_timer_fn(unsigned long __data)
 /**
  * queue_delayed_work - queue work on a workqueue after delay
  * @wq: workqueue to use
- * @work: delayable work to queue
+ * @dwork: delayable work to queue
  * @delay: number of jiffies to wait before queueing
  *
  * Returns 0 if @work was already on a queue, non-zero otherwise.
@@ -248,7 +248,7 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq,
        if (delay == 0)
                return queue_work(wq, work);
 
-       if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
+       if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
                BUG_ON(timer_pending(timer));
                BUG_ON(!list_empty(&work->entry));
 
@@ -268,7 +268,7 @@ EXPORT_SYMBOL_GPL(queue_delayed_work);
  * queue_delayed_work_on - queue work on specific CPU after delay
  * @cpu: CPU number to execute work on
  * @wq: workqueue to use
- * @work: work to queue
+ * @dwork: work to queue
  * @delay: number of jiffies to wait before queueing
  *
  * Returns 0 if @work was already on a queue, non-zero otherwise.
@@ -280,7 +280,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
        struct timer_list *timer = &dwork->timer;
        struct work_struct *work = &dwork->work;
 
-       if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
+       if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
                BUG_ON(timer_pending(timer));
                BUG_ON(!list_empty(&work->entry));
 
@@ -321,7 +321,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
                spin_unlock_irqrestore(&cwq->lock, flags);
 
                BUG_ON(get_wq_data(work) != cwq);
-               if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management))
+               if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
                        work_release(work);
                f(work);
 
@@ -637,9 +637,11 @@ int schedule_on_each_cpu(work_func_t func)
 
        mutex_lock(&workqueue_mutex);
        for_each_online_cpu(cpu) {
-               INIT_WORK(per_cpu_ptr(works, cpu), func);
-               __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
-                               per_cpu_ptr(works, cpu));
+               struct work_struct *work = per_cpu_ptr(works, cpu);
+
+               INIT_WORK(work, func);
+               set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
+               __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
        }
        mutex_unlock(&workqueue_mutex);
        flush_workqueue(keventd_wq);
@@ -654,8 +656,7 @@ void flush_scheduled_work(void)
 EXPORT_SYMBOL(flush_scheduled_work);
 
 /**
- * cancel_rearming_delayed_workqueue - reliably kill off a delayed
- *                     work whose handler rearms the delayed work.
+ * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work.
  * @wq:   the controlling workqueue structure
  * @dwork: the delayed work struct
  */
@@ -668,8 +669,7 @@ void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
 EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
 
 /**
- * cancel_rearming_delayed_work - reliably kill off a delayed keventd
- *                     work whose handler rearms the delayed work.
+ * cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work.
  * @dwork: the delayed work struct
  */
 void cancel_rearming_delayed_work(struct delayed_work *dwork)