Merge branches 'for-3.12/upstream-fixes', 'for-3.13/holtek', 'for-3.13/i2c-hid',...
[linux-drm-fsl-dcu.git] / fs / btrfs / async-thread.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/kthread.h>
20 #include <linux/slab.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23 #include <linux/freezer.h>
24 #include "async-thread.h"
25
26 #define WORK_QUEUED_BIT 0
27 #define WORK_DONE_BIT 1
28 #define WORK_ORDER_DONE_BIT 2
29 #define WORK_HIGH_PRIO_BIT 3
30
31 /*
32  * container for the kthread task pointer and the list of pending work
33  * One of these is allocated per thread.
34  */
35 struct btrfs_worker_thread {
36         /* pool we belong to */
37         struct btrfs_workers *workers;
38
39         /* list of struct btrfs_work that are waiting for service */
40         struct list_head pending;
41         struct list_head prio_pending;
42
43         /* list of worker threads from struct btrfs_workers */
44         struct list_head worker_list;
45
46         /* kthread */
47         struct task_struct *task;
48
49         /* number of things on the pending list */
50         atomic_t num_pending;
51
52         /* reference counter for this struct */
53         atomic_t refs;
54
55         unsigned long sequence;
56
57         /* protects the pending list. */
58         spinlock_t lock;
59
60         /* set to non-zero when this thread is already awake and kicking */
61         int working;
62
63         /* are we currently idle */
64         int idle;
65 };
66
67 static int __btrfs_start_workers(struct btrfs_workers *workers);
68
69 /*
70  * btrfs_start_workers uses kthread_run, which can block waiting for memory
71  * for a very long time.  It will actually throttle on page writeback,
72  * and so it may not make progress until after our btrfs worker threads
73  * process all of the pending work structs in their queue
74  *
75  * This means we can't use btrfs_start_workers from inside a btrfs worker
76  * thread that is used as part of cleaning dirty memory, which pretty much
77  * involves all of the worker threads.
78  *
79  * Instead we have a helper queue who never has more than one thread
80  * where we scheduler thread start operations.  This worker_start struct
81  * is used to contain the work and hold a pointer to the queue that needs
82  * another worker.
83  */
84 struct worker_start {
85         struct btrfs_work work;
86         struct btrfs_workers *queue;
87 };
88
89 static void start_new_worker_func(struct btrfs_work *work)
90 {
91         struct worker_start *start;
92         start = container_of(work, struct worker_start, work);
93         __btrfs_start_workers(start->queue);
94         kfree(start);
95 }
96
97 /*
98  * helper function to move a thread onto the idle list after it
99  * has finished some requests.
100  */
101 static void check_idle_worker(struct btrfs_worker_thread *worker)
102 {
103         if (!worker->idle && atomic_read(&worker->num_pending) <
104             worker->workers->idle_thresh / 2) {
105                 unsigned long flags;
106                 spin_lock_irqsave(&worker->workers->lock, flags);
107                 worker->idle = 1;
108
109                 /* the list may be empty if the worker is just starting */
110                 if (!list_empty(&worker->worker_list) &&
111                     !worker->workers->stopping) {
112                         list_move(&worker->worker_list,
113                                  &worker->workers->idle_list);
114                 }
115                 spin_unlock_irqrestore(&worker->workers->lock, flags);
116         }
117 }
118
119 /*
120  * helper function to move a thread off the idle list after new
121  * pending work is added.
122  */
123 static void check_busy_worker(struct btrfs_worker_thread *worker)
124 {
125         if (worker->idle && atomic_read(&worker->num_pending) >=
126             worker->workers->idle_thresh) {
127                 unsigned long flags;
128                 spin_lock_irqsave(&worker->workers->lock, flags);
129                 worker->idle = 0;
130
131                 if (!list_empty(&worker->worker_list) &&
132                     !worker->workers->stopping) {
133                         list_move_tail(&worker->worker_list,
134                                       &worker->workers->worker_list);
135                 }
136                 spin_unlock_irqrestore(&worker->workers->lock, flags);
137         }
138 }
139
140 static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
141 {
142         struct btrfs_workers *workers = worker->workers;
143         struct worker_start *start;
144         unsigned long flags;
145
146         rmb();
147         if (!workers->atomic_start_pending)
148                 return;
149
150         start = kzalloc(sizeof(*start), GFP_NOFS);
151         if (!start)
152                 return;
153
154         start->work.func = start_new_worker_func;
155         start->queue = workers;
156
157         spin_lock_irqsave(&workers->lock, flags);
158         if (!workers->atomic_start_pending)
159                 goto out;
160
161         workers->atomic_start_pending = 0;
162         if (workers->num_workers + workers->num_workers_starting >=
163             workers->max_workers)
164                 goto out;
165
166         workers->num_workers_starting += 1;
167         spin_unlock_irqrestore(&workers->lock, flags);
168         btrfs_queue_worker(workers->atomic_worker_start, &start->work);
169         return;
170
171 out:
172         kfree(start);
173         spin_unlock_irqrestore(&workers->lock, flags);
174 }
175
176 static noinline void run_ordered_completions(struct btrfs_workers *workers,
177                                             struct btrfs_work *work)
178 {
179         if (!workers->ordered)
180                 return;
181
182         set_bit(WORK_DONE_BIT, &work->flags);
183
184         spin_lock(&workers->order_lock);
185
186         while (1) {
187                 if (!list_empty(&workers->prio_order_list)) {
188                         work = list_entry(workers->prio_order_list.next,
189                                           struct btrfs_work, order_list);
190                 } else if (!list_empty(&workers->order_list)) {
191                         work = list_entry(workers->order_list.next,
192                                           struct btrfs_work, order_list);
193                 } else {
194                         break;
195                 }
196                 if (!test_bit(WORK_DONE_BIT, &work->flags))
197                         break;
198
199                 /* we are going to call the ordered done function, but
200                  * we leave the work item on the list as a barrier so
201                  * that later work items that are done don't have their
202                  * functions called before this one returns
203                  */
204                 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
205                         break;
206
207                 spin_unlock(&workers->order_lock);
208
209                 work->ordered_func(work);
210
211                 /* now take the lock again and drop our item from the list */
212                 spin_lock(&workers->order_lock);
213                 list_del(&work->order_list);
214                 spin_unlock(&workers->order_lock);
215
216                 /*
217                  * we don't want to call the ordered free functions
218                  * with the lock held though
219                  */
220                 work->ordered_free(work);
221                 spin_lock(&workers->order_lock);
222         }
223
224         spin_unlock(&workers->order_lock);
225 }
226
227 static void put_worker(struct btrfs_worker_thread *worker)
228 {
229         if (atomic_dec_and_test(&worker->refs))
230                 kfree(worker);
231 }
232
233 static int try_worker_shutdown(struct btrfs_worker_thread *worker)
234 {
235         int freeit = 0;
236
237         spin_lock_irq(&worker->lock);
238         spin_lock(&worker->workers->lock);
239         if (worker->workers->num_workers > 1 &&
240             worker->idle &&
241             !worker->working &&
242             !list_empty(&worker->worker_list) &&
243             list_empty(&worker->prio_pending) &&
244             list_empty(&worker->pending) &&
245             atomic_read(&worker->num_pending) == 0) {
246                 freeit = 1;
247                 list_del_init(&worker->worker_list);
248                 worker->workers->num_workers--;
249         }
250         spin_unlock(&worker->workers->lock);
251         spin_unlock_irq(&worker->lock);
252
253         if (freeit)
254                 put_worker(worker);
255         return freeit;
256 }
257
258 static struct btrfs_work *get_next_work(struct btrfs_worker_thread *worker,
259                                         struct list_head *prio_head,
260                                         struct list_head *head)
261 {
262         struct btrfs_work *work = NULL;
263         struct list_head *cur = NULL;
264
265         if(!list_empty(prio_head))
266                 cur = prio_head->next;
267
268         smp_mb();
269         if (!list_empty(&worker->prio_pending))
270                 goto refill;
271
272         if (!list_empty(head))
273                 cur = head->next;
274
275         if (cur)
276                 goto out;
277
278 refill:
279         spin_lock_irq(&worker->lock);
280         list_splice_tail_init(&worker->prio_pending, prio_head);
281         list_splice_tail_init(&worker->pending, head);
282
283         if (!list_empty(prio_head))
284                 cur = prio_head->next;
285         else if (!list_empty(head))
286                 cur = head->next;
287         spin_unlock_irq(&worker->lock);
288
289         if (!cur)
290                 goto out_fail;
291
292 out:
293         work = list_entry(cur, struct btrfs_work, list);
294
295 out_fail:
296         return work;
297 }
298
299 /*
300  * main loop for servicing work items
301  */
302 static int worker_loop(void *arg)
303 {
304         struct btrfs_worker_thread *worker = arg;
305         struct list_head head;
306         struct list_head prio_head;
307         struct btrfs_work *work;
308
309         INIT_LIST_HEAD(&head);
310         INIT_LIST_HEAD(&prio_head);
311
312         do {
313 again:
314                 while (1) {
315
316
317                         work = get_next_work(worker, &prio_head, &head);
318                         if (!work)
319                                 break;
320
321                         list_del(&work->list);
322                         clear_bit(WORK_QUEUED_BIT, &work->flags);
323
324                         work->worker = worker;
325
326                         work->func(work);
327
328                         atomic_dec(&worker->num_pending);
329                         /*
330                          * unless this is an ordered work queue,
331                          * 'work' was probably freed by func above.
332                          */
333                         run_ordered_completions(worker->workers, work);
334
335                         check_pending_worker_creates(worker);
336                         cond_resched();
337                 }
338
339                 spin_lock_irq(&worker->lock);
340                 check_idle_worker(worker);
341
342                 if (freezing(current)) {
343                         worker->working = 0;
344                         spin_unlock_irq(&worker->lock);
345                         try_to_freeze();
346                 } else {
347                         spin_unlock_irq(&worker->lock);
348                         if (!kthread_should_stop()) {
349                                 cpu_relax();
350                                 /*
351                                  * we've dropped the lock, did someone else
352                                  * jump_in?
353                                  */
354                                 smp_mb();
355                                 if (!list_empty(&worker->pending) ||
356                                     !list_empty(&worker->prio_pending))
357                                         continue;
358
359                                 /*
360                                  * this short schedule allows more work to
361                                  * come in without the queue functions
362                                  * needing to go through wake_up_process()
363                                  *
364                                  * worker->working is still 1, so nobody
365                                  * is going to try and wake us up
366                                  */
367                                 schedule_timeout(1);
368                                 smp_mb();
369                                 if (!list_empty(&worker->pending) ||
370                                     !list_empty(&worker->prio_pending))
371                                         continue;
372
373                                 if (kthread_should_stop())
374                                         break;
375
376                                 /* still no more work?, sleep for real */
377                                 spin_lock_irq(&worker->lock);
378                                 set_current_state(TASK_INTERRUPTIBLE);
379                                 if (!list_empty(&worker->pending) ||
380                                     !list_empty(&worker->prio_pending)) {
381                                         spin_unlock_irq(&worker->lock);
382                                         set_current_state(TASK_RUNNING);
383                                         goto again;
384                                 }
385
386                                 /*
387                                  * this makes sure we get a wakeup when someone
388                                  * adds something new to the queue
389                                  */
390                                 worker->working = 0;
391                                 spin_unlock_irq(&worker->lock);
392
393                                 if (!kthread_should_stop()) {
394                                         schedule_timeout(HZ * 120);
395                                         if (!worker->working &&
396                                             try_worker_shutdown(worker)) {
397                                                 return 0;
398                                         }
399                                 }
400                         }
401                         __set_current_state(TASK_RUNNING);
402                 }
403         } while (!kthread_should_stop());
404         return 0;
405 }
406
407 /*
408  * this will wait for all the worker threads to shutdown
409  */
410 void btrfs_stop_workers(struct btrfs_workers *workers)
411 {
412         struct list_head *cur;
413         struct btrfs_worker_thread *worker;
414         int can_stop;
415
416         spin_lock_irq(&workers->lock);
417         workers->stopping = 1;
418         list_splice_init(&workers->idle_list, &workers->worker_list);
419         while (!list_empty(&workers->worker_list)) {
420                 cur = workers->worker_list.next;
421                 worker = list_entry(cur, struct btrfs_worker_thread,
422                                     worker_list);
423
424                 atomic_inc(&worker->refs);
425                 workers->num_workers -= 1;
426                 if (!list_empty(&worker->worker_list)) {
427                         list_del_init(&worker->worker_list);
428                         put_worker(worker);
429                         can_stop = 1;
430                 } else
431                         can_stop = 0;
432                 spin_unlock_irq(&workers->lock);
433                 if (can_stop)
434                         kthread_stop(worker->task);
435                 spin_lock_irq(&workers->lock);
436                 put_worker(worker);
437         }
438         spin_unlock_irq(&workers->lock);
439 }
440
441 /*
442  * simple init on struct btrfs_workers
443  */
444 void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
445                         struct btrfs_workers *async_helper)
446 {
447         workers->num_workers = 0;
448         workers->num_workers_starting = 0;
449         INIT_LIST_HEAD(&workers->worker_list);
450         INIT_LIST_HEAD(&workers->idle_list);
451         INIT_LIST_HEAD(&workers->order_list);
452         INIT_LIST_HEAD(&workers->prio_order_list);
453         spin_lock_init(&workers->lock);
454         spin_lock_init(&workers->order_lock);
455         workers->max_workers = max;
456         workers->idle_thresh = 32;
457         workers->name = name;
458         workers->ordered = 0;
459         workers->atomic_start_pending = 0;
460         workers->atomic_worker_start = async_helper;
461         workers->stopping = 0;
462 }
463
464 /*
465  * starts new worker threads.  This does not enforce the max worker
466  * count in case you need to temporarily go past it.
467  */
468 static int __btrfs_start_workers(struct btrfs_workers *workers)
469 {
470         struct btrfs_worker_thread *worker;
471         int ret = 0;
472
473         worker = kzalloc(sizeof(*worker), GFP_NOFS);
474         if (!worker) {
475                 ret = -ENOMEM;
476                 goto fail;
477         }
478
479         INIT_LIST_HEAD(&worker->pending);
480         INIT_LIST_HEAD(&worker->prio_pending);
481         INIT_LIST_HEAD(&worker->worker_list);
482         spin_lock_init(&worker->lock);
483
484         atomic_set(&worker->num_pending, 0);
485         atomic_set(&worker->refs, 1);
486         worker->workers = workers;
487         worker->task = kthread_create(worker_loop, worker,
488                                       "btrfs-%s-%d", workers->name,
489                                       workers->num_workers + 1);
490         if (IS_ERR(worker->task)) {
491                 ret = PTR_ERR(worker->task);
492                 goto fail;
493         }
494
495         spin_lock_irq(&workers->lock);
496         if (workers->stopping) {
497                 spin_unlock_irq(&workers->lock);
498                 goto fail_kthread;
499         }
500         list_add_tail(&worker->worker_list, &workers->idle_list);
501         worker->idle = 1;
502         workers->num_workers++;
503         workers->num_workers_starting--;
504         WARN_ON(workers->num_workers_starting < 0);
505         spin_unlock_irq(&workers->lock);
506
507         wake_up_process(worker->task);
508         return 0;
509
510 fail_kthread:
511         kthread_stop(worker->task);
512 fail:
513         kfree(worker);
514         spin_lock_irq(&workers->lock);
515         workers->num_workers_starting--;
516         spin_unlock_irq(&workers->lock);
517         return ret;
518 }
519
520 int btrfs_start_workers(struct btrfs_workers *workers)
521 {
522         spin_lock_irq(&workers->lock);
523         workers->num_workers_starting++;
524         spin_unlock_irq(&workers->lock);
525         return __btrfs_start_workers(workers);
526 }
527
528 /*
529  * run through the list and find a worker thread that doesn't have a lot
530  * to do right now.  This can return null if we aren't yet at the thread
531  * count limit and all of the threads are busy.
532  */
533 static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
534 {
535         struct btrfs_worker_thread *worker;
536         struct list_head *next;
537         int enforce_min;
538
539         enforce_min = (workers->num_workers + workers->num_workers_starting) <
540                 workers->max_workers;
541
542         /*
543          * if we find an idle thread, don't move it to the end of the
544          * idle list.  This improves the chance that the next submission
545          * will reuse the same thread, and maybe catch it while it is still
546          * working
547          */
548         if (!list_empty(&workers->idle_list)) {
549                 next = workers->idle_list.next;
550                 worker = list_entry(next, struct btrfs_worker_thread,
551                                     worker_list);
552                 return worker;
553         }
554         if (enforce_min || list_empty(&workers->worker_list))
555                 return NULL;
556
557         /*
558          * if we pick a busy task, move the task to the end of the list.
559          * hopefully this will keep things somewhat evenly balanced.
560          * Do the move in batches based on the sequence number.  This groups
561          * requests submitted at roughly the same time onto the same worker.
562          */
563         next = workers->worker_list.next;
564         worker = list_entry(next, struct btrfs_worker_thread, worker_list);
565         worker->sequence++;
566
567         if (worker->sequence % workers->idle_thresh == 0)
568                 list_move_tail(next, &workers->worker_list);
569         return worker;
570 }
571
572 /*
573  * selects a worker thread to take the next job.  This will either find
574  * an idle worker, start a new worker up to the max count, or just return
575  * one of the existing busy workers.
576  */
577 static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
578 {
579         struct btrfs_worker_thread *worker;
580         unsigned long flags;
581         struct list_head *fallback;
582         int ret;
583
584         spin_lock_irqsave(&workers->lock, flags);
585 again:
586         worker = next_worker(workers);
587
588         if (!worker) {
589                 if (workers->num_workers + workers->num_workers_starting >=
590                     workers->max_workers) {
591                         goto fallback;
592                 } else if (workers->atomic_worker_start) {
593                         workers->atomic_start_pending = 1;
594                         goto fallback;
595                 } else {
596                         workers->num_workers_starting++;
597                         spin_unlock_irqrestore(&workers->lock, flags);
598                         /* we're below the limit, start another worker */
599                         ret = __btrfs_start_workers(workers);
600                         spin_lock_irqsave(&workers->lock, flags);
601                         if (ret)
602                                 goto fallback;
603                         goto again;
604                 }
605         }
606         goto found;
607
608 fallback:
609         fallback = NULL;
610         /*
611          * we have failed to find any workers, just
612          * return the first one we can find.
613          */
614         if (!list_empty(&workers->worker_list))
615                 fallback = workers->worker_list.next;
616         if (!list_empty(&workers->idle_list))
617                 fallback = workers->idle_list.next;
618         BUG_ON(!fallback);
619         worker = list_entry(fallback,
620                   struct btrfs_worker_thread, worker_list);
621 found:
622         /*
623          * this makes sure the worker doesn't exit before it is placed
624          * onto a busy/idle list
625          */
626         atomic_inc(&worker->num_pending);
627         spin_unlock_irqrestore(&workers->lock, flags);
628         return worker;
629 }
630
631 /*
632  * btrfs_requeue_work just puts the work item back on the tail of the list
633  * it was taken from.  It is intended for use with long running work functions
634  * that make some progress and want to give the cpu up for others.
635  */
636 void btrfs_requeue_work(struct btrfs_work *work)
637 {
638         struct btrfs_worker_thread *worker = work->worker;
639         unsigned long flags;
640         int wake = 0;
641
642         if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
643                 return;
644
645         spin_lock_irqsave(&worker->lock, flags);
646         if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
647                 list_add_tail(&work->list, &worker->prio_pending);
648         else
649                 list_add_tail(&work->list, &worker->pending);
650         atomic_inc(&worker->num_pending);
651
652         /* by definition we're busy, take ourselves off the idle
653          * list
654          */
655         if (worker->idle) {
656                 spin_lock(&worker->workers->lock);
657                 worker->idle = 0;
658                 list_move_tail(&worker->worker_list,
659                               &worker->workers->worker_list);
660                 spin_unlock(&worker->workers->lock);
661         }
662         if (!worker->working) {
663                 wake = 1;
664                 worker->working = 1;
665         }
666
667         if (wake)
668                 wake_up_process(worker->task);
669         spin_unlock_irqrestore(&worker->lock, flags);
670 }
671
672 void btrfs_set_work_high_prio(struct btrfs_work *work)
673 {
674         set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
675 }
676
677 /*
678  * places a struct btrfs_work into the pending queue of one of the kthreads
679  */
680 void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
681 {
682         struct btrfs_worker_thread *worker;
683         unsigned long flags;
684         int wake = 0;
685
686         /* don't requeue something already on a list */
687         if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
688                 return;
689
690         worker = find_worker(workers);
691         if (workers->ordered) {
692                 /*
693                  * you're not allowed to do ordered queues from an
694                  * interrupt handler
695                  */
696                 spin_lock(&workers->order_lock);
697                 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) {
698                         list_add_tail(&work->order_list,
699                                       &workers->prio_order_list);
700                 } else {
701                         list_add_tail(&work->order_list, &workers->order_list);
702                 }
703                 spin_unlock(&workers->order_lock);
704         } else {
705                 INIT_LIST_HEAD(&work->order_list);
706         }
707
708         spin_lock_irqsave(&worker->lock, flags);
709
710         if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
711                 list_add_tail(&work->list, &worker->prio_pending);
712         else
713                 list_add_tail(&work->list, &worker->pending);
714         check_busy_worker(worker);
715
716         /*
717          * avoid calling into wake_up_process if this thread has already
718          * been kicked
719          */
720         if (!worker->working)
721                 wake = 1;
722         worker->working = 1;
723
724         if (wake)
725                 wake_up_process(worker->task);
726         spin_unlock_irqrestore(&worker->lock, flags);
727 }