MIPS: Partially disable RIXI support.
[linux-drm-fsl-dcu.git] / kernel / rcu / tree_plugin.h
1 /*
2  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3  * Internal non-public definitions that provide either classic
4  * or preemptible semantics.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  * Copyright Red Hat, 2009
21  * Copyright IBM Corporation, 2009
22  *
23  * Author: Ingo Molnar <mingo@elte.hu>
24  *         Paul E. McKenney <paulmck@linux.vnet.ibm.com>
25  */
26
27 #include <linux/delay.h>
28 #include <linux/gfp.h>
29 #include <linux/oom.h>
30 #include <linux/smpboot.h>
31 #include "../time/tick-internal.h"
32
33 #ifdef CONFIG_RCU_BOOST
34
35 #include "../locking/rtmutex_common.h"
36
37 /*
38  * Control variables for per-CPU and per-rcu_node kthreads.  These
39  * handle all flavors of RCU.
40  */
41 static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
42 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
43 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
44 DEFINE_PER_CPU(char, rcu_cpu_has_work);
45
46 #else /* #ifdef CONFIG_RCU_BOOST */
47
48 /*
49  * Some architectures do not define rt_mutexes, but if !CONFIG_RCU_BOOST,
50  * all uses are in dead code.  Provide a definition to keep the compiler
51  * happy, but add WARN_ON_ONCE() to complain if used in the wrong place.
52  * This probably needs to be excluded from -rt builds.
53  */
54 #define rt_mutex_owner(a) ({ WARN_ON_ONCE(1); NULL; })
55
56 #endif /* #else #ifdef CONFIG_RCU_BOOST */
57
58 #ifdef CONFIG_RCU_NOCB_CPU
59 static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
60 static bool have_rcu_nocb_mask;     /* Was rcu_nocb_mask allocated? */
61 static bool __read_mostly rcu_nocb_poll;    /* Offload kthread are to poll. */
62 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
63
64 /*
65  * Check the RCU kernel configuration parameters and print informative
66  * messages about anything out of the ordinary.  If you like #ifdef, you
67  * will love this function.
68  */
69 static void __init rcu_bootup_announce_oddness(void)
70 {
71         if (IS_ENABLED(CONFIG_RCU_TRACE))
72                 pr_info("\tRCU debugfs-based tracing is enabled.\n");
73         if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) ||
74             (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32))
75                 pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
76                        RCU_FANOUT);
77         if (rcu_fanout_exact)
78                 pr_info("\tHierarchical RCU autobalancing is disabled.\n");
79         if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ))
80                 pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n");
81         if (IS_ENABLED(CONFIG_PROVE_RCU))
82                 pr_info("\tRCU lockdep checking is enabled.\n");
83         if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_RUNNABLE))
84                 pr_info("\tRCU torture testing starts during boot.\n");
85         if (IS_ENABLED(CONFIG_RCU_CPU_STALL_INFO))
86                 pr_info("\tAdditional per-CPU info printed with stalls.\n");
87         if (NUM_RCU_LVL_4 != 0)
88                 pr_info("\tFour-level hierarchy is enabled.\n");
89         if (RCU_FANOUT_LEAF != 16)
90                 pr_info("\tBuild-time adjustment of leaf fanout to %d.\n",
91                         RCU_FANOUT_LEAF);
92         if (rcu_fanout_leaf != RCU_FANOUT_LEAF)
93                 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
94         if (nr_cpu_ids != NR_CPUS)
95                 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
96         if (IS_ENABLED(CONFIG_RCU_BOOST))
97                 pr_info("\tRCU kthread priority: %d.\n", kthread_prio);
98 }
99
100 #ifdef CONFIG_PREEMPT_RCU
101
102 RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
103 static struct rcu_state *const rcu_state_p = &rcu_preempt_state;
104 static struct rcu_data __percpu *const rcu_data_p = &rcu_preempt_data;
105
106 static int rcu_preempted_readers_exp(struct rcu_node *rnp);
107 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
108                                bool wake);
109
110 /*
111  * Tell them what RCU they are running.
112  */
113 static void __init rcu_bootup_announce(void)
114 {
115         pr_info("Preemptible hierarchical RCU implementation.\n");
116         rcu_bootup_announce_oddness();
117 }
118
119 /*
120  * Record a preemptible-RCU quiescent state for the specified CPU.  Note
121  * that this just means that the task currently running on the CPU is
122  * not in a quiescent state.  There might be any number of tasks blocked
123  * while in an RCU read-side critical section.
124  *
125  * As with the other rcu_*_qs() functions, callers to this function
126  * must disable preemption.
127  */
128 static void rcu_preempt_qs(void)
129 {
130         if (!__this_cpu_read(rcu_data_p->passed_quiesce)) {
131                 trace_rcu_grace_period(TPS("rcu_preempt"),
132                                        __this_cpu_read(rcu_data_p->gpnum),
133                                        TPS("cpuqs"));
134                 __this_cpu_write(rcu_data_p->passed_quiesce, 1);
135                 barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */
136                 current->rcu_read_unlock_special.b.need_qs = false;
137         }
138 }
139
140 /*
141  * We have entered the scheduler, and the current task might soon be
142  * context-switched away from.  If this task is in an RCU read-side
143  * critical section, we will no longer be able to rely on the CPU to
144  * record that fact, so we enqueue the task on the blkd_tasks list.
145  * The task will dequeue itself when it exits the outermost enclosing
146  * RCU read-side critical section.  Therefore, the current grace period
147  * cannot be permitted to complete until the blkd_tasks list entries
148  * predating the current grace period drain, in other words, until
149  * rnp->gp_tasks becomes NULL.
150  *
151  * Caller must disable preemption.
152  */
153 static void rcu_preempt_note_context_switch(void)
154 {
155         struct task_struct *t = current;
156         unsigned long flags;
157         struct rcu_data *rdp;
158         struct rcu_node *rnp;
159
160         if (t->rcu_read_lock_nesting > 0 &&
161             !t->rcu_read_unlock_special.b.blocked) {
162
163                 /* Possibly blocking in an RCU read-side critical section. */
164                 rdp = this_cpu_ptr(rcu_state_p->rda);
165                 rnp = rdp->mynode;
166                 raw_spin_lock_irqsave(&rnp->lock, flags);
167                 smp_mb__after_unlock_lock();
168                 t->rcu_read_unlock_special.b.blocked = true;
169                 t->rcu_blocked_node = rnp;
170
171                 /*
172                  * If this CPU has already checked in, then this task
173                  * will hold up the next grace period rather than the
174                  * current grace period.  Queue the task accordingly.
175                  * If the task is queued for the current grace period
176                  * (i.e., this CPU has not yet passed through a quiescent
177                  * state for the current grace period), then as long
178                  * as that task remains queued, the current grace period
179                  * cannot end.  Note that there is some uncertainty as
180                  * to exactly when the current grace period started.
181                  * We take a conservative approach, which can result
182                  * in unnecessarily waiting on tasks that started very
183                  * slightly after the current grace period began.  C'est
184                  * la vie!!!
185                  *
186                  * But first, note that the current CPU must still be
187                  * on line!
188                  */
189                 WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0);
190                 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
191                 if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
192                         list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
193                         rnp->gp_tasks = &t->rcu_node_entry;
194                         if (IS_ENABLED(CONFIG_RCU_BOOST) &&
195                             rnp->boost_tasks != NULL)
196                                 rnp->boost_tasks = rnp->gp_tasks;
197                 } else {
198                         list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
199                         if (rnp->qsmask & rdp->grpmask)
200                                 rnp->gp_tasks = &t->rcu_node_entry;
201                 }
202                 trace_rcu_preempt_task(rdp->rsp->name,
203                                        t->pid,
204                                        (rnp->qsmask & rdp->grpmask)
205                                        ? rnp->gpnum
206                                        : rnp->gpnum + 1);
207                 raw_spin_unlock_irqrestore(&rnp->lock, flags);
208         } else if (t->rcu_read_lock_nesting < 0 &&
209                    t->rcu_read_unlock_special.s) {
210
211                 /*
212                  * Complete exit from RCU read-side critical section on
213                  * behalf of preempted instance of __rcu_read_unlock().
214                  */
215                 rcu_read_unlock_special(t);
216         }
217
218         /*
219          * Either we were not in an RCU read-side critical section to
220          * begin with, or we have now recorded that critical section
221          * globally.  Either way, we can now note a quiescent state
222          * for this CPU.  Again, if we were in an RCU read-side critical
223          * section, and if that critical section was blocking the current
224          * grace period, then the fact that the task has been enqueued
225          * means that we continue to block the current grace period.
226          */
227         rcu_preempt_qs();
228 }
229
230 /*
231  * Check for preempted RCU readers blocking the current grace period
232  * for the specified rcu_node structure.  If the caller needs a reliable
233  * answer, it must hold the rcu_node's ->lock.
234  */
235 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
236 {
237         return rnp->gp_tasks != NULL;
238 }
239
240 /*
241  * Advance a ->blkd_tasks-list pointer to the next entry, instead
242  * returning NULL if at the end of the list.
243  */
244 static struct list_head *rcu_next_node_entry(struct task_struct *t,
245                                              struct rcu_node *rnp)
246 {
247         struct list_head *np;
248
249         np = t->rcu_node_entry.next;
250         if (np == &rnp->blkd_tasks)
251                 np = NULL;
252         return np;
253 }
254
255 /*
256  * Return true if the specified rcu_node structure has tasks that were
257  * preempted within an RCU read-side critical section.
258  */
259 static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
260 {
261         return !list_empty(&rnp->blkd_tasks);
262 }
263
264 /*
265  * Handle special cases during rcu_read_unlock(), such as needing to
266  * notify RCU core processing or task having blocked during the RCU
267  * read-side critical section.
268  */
269 void rcu_read_unlock_special(struct task_struct *t)
270 {
271         bool empty_exp;
272         bool empty_norm;
273         bool empty_exp_now;
274         unsigned long flags;
275         struct list_head *np;
276         bool drop_boost_mutex = false;
277         struct rcu_node *rnp;
278         union rcu_special special;
279
280         /* NMI handlers cannot block and cannot safely manipulate state. */
281         if (in_nmi())
282                 return;
283
284         local_irq_save(flags);
285
286         /*
287          * If RCU core is waiting for this CPU to exit critical section,
288          * let it know that we have done so.  Because irqs are disabled,
289          * t->rcu_read_unlock_special cannot change.
290          */
291         special = t->rcu_read_unlock_special;
292         if (special.b.need_qs) {
293                 rcu_preempt_qs();
294                 t->rcu_read_unlock_special.b.need_qs = false;
295                 if (!t->rcu_read_unlock_special.s) {
296                         local_irq_restore(flags);
297                         return;
298                 }
299         }
300
301         /* Hardware IRQ handlers cannot block, complain if they get here. */
302         if (in_irq() || in_serving_softirq()) {
303                 lockdep_rcu_suspicious(__FILE__, __LINE__,
304                                        "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n");
305                 pr_alert("->rcu_read_unlock_special: %#x (b: %d, nq: %d)\n",
306                          t->rcu_read_unlock_special.s,
307                          t->rcu_read_unlock_special.b.blocked,
308                          t->rcu_read_unlock_special.b.need_qs);
309                 local_irq_restore(flags);
310                 return;
311         }
312
313         /* Clean up if blocked during RCU read-side critical section. */
314         if (special.b.blocked) {
315                 t->rcu_read_unlock_special.b.blocked = false;
316
317                 /*
318                  * Remove this task from the list it blocked on.  The task
319                  * now remains queued on the rcu_node corresponding to
320                  * the CPU it first blocked on, so the first attempt to
321                  * acquire the task's rcu_node's ->lock will succeed.
322                  * Keep the loop and add a WARN_ON() out of sheer paranoia.
323                  */
324                 for (;;) {
325                         rnp = t->rcu_blocked_node;
326                         raw_spin_lock(&rnp->lock);  /* irqs already disabled. */
327                         smp_mb__after_unlock_lock();
328                         if (rnp == t->rcu_blocked_node)
329                                 break;
330                         WARN_ON_ONCE(1);
331                         raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
332                 }
333                 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
334                 empty_exp = !rcu_preempted_readers_exp(rnp);
335                 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
336                 np = rcu_next_node_entry(t, rnp);
337                 list_del_init(&t->rcu_node_entry);
338                 t->rcu_blocked_node = NULL;
339                 trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
340                                                 rnp->gpnum, t->pid);
341                 if (&t->rcu_node_entry == rnp->gp_tasks)
342                         rnp->gp_tasks = np;
343                 if (&t->rcu_node_entry == rnp->exp_tasks)
344                         rnp->exp_tasks = np;
345                 if (IS_ENABLED(CONFIG_RCU_BOOST)) {
346                         if (&t->rcu_node_entry == rnp->boost_tasks)
347                                 rnp->boost_tasks = np;
348                         /* Snapshot ->boost_mtx ownership w/rnp->lock held. */
349                         drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
350                 }
351
352                 /*
353                  * If this was the last task on the current list, and if
354                  * we aren't waiting on any CPUs, report the quiescent state.
355                  * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
356                  * so we must take a snapshot of the expedited state.
357                  */
358                 empty_exp_now = !rcu_preempted_readers_exp(rnp);
359                 if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
360                         trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
361                                                          rnp->gpnum,
362                                                          0, rnp->qsmask,
363                                                          rnp->level,
364                                                          rnp->grplo,
365                                                          rnp->grphi,
366                                                          !!rnp->gp_tasks);
367                         rcu_report_unblock_qs_rnp(rcu_state_p, rnp, flags);
368                 } else {
369                         raw_spin_unlock_irqrestore(&rnp->lock, flags);
370                 }
371
372                 /* Unboost if we were boosted. */
373                 if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
374                         rt_mutex_unlock(&rnp->boost_mtx);
375
376                 /*
377                  * If this was the last task on the expedited lists,
378                  * then we need to report up the rcu_node hierarchy.
379                  */
380                 if (!empty_exp && empty_exp_now)
381                         rcu_report_exp_rnp(rcu_state_p, rnp, true);
382         } else {
383                 local_irq_restore(flags);
384         }
385 }
386
387 /*
388  * Dump detailed information for all tasks blocking the current RCU
389  * grace period on the specified rcu_node structure.
390  */
391 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
392 {
393         unsigned long flags;
394         struct task_struct *t;
395
396         raw_spin_lock_irqsave(&rnp->lock, flags);
397         if (!rcu_preempt_blocked_readers_cgp(rnp)) {
398                 raw_spin_unlock_irqrestore(&rnp->lock, flags);
399                 return;
400         }
401         t = list_entry(rnp->gp_tasks->prev,
402                        struct task_struct, rcu_node_entry);
403         list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
404                 sched_show_task(t);
405         raw_spin_unlock_irqrestore(&rnp->lock, flags);
406 }
407
408 /*
409  * Dump detailed information for all tasks blocking the current RCU
410  * grace period.
411  */
412 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
413 {
414         struct rcu_node *rnp = rcu_get_root(rsp);
415
416         rcu_print_detail_task_stall_rnp(rnp);
417         rcu_for_each_leaf_node(rsp, rnp)
418                 rcu_print_detail_task_stall_rnp(rnp);
419 }
420
421 #ifdef CONFIG_RCU_CPU_STALL_INFO
422
423 static void rcu_print_task_stall_begin(struct rcu_node *rnp)
424 {
425         pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
426                rnp->level, rnp->grplo, rnp->grphi);
427 }
428
429 static void rcu_print_task_stall_end(void)
430 {
431         pr_cont("\n");
432 }
433
434 #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
435
436 static void rcu_print_task_stall_begin(struct rcu_node *rnp)
437 {
438 }
439
440 static void rcu_print_task_stall_end(void)
441 {
442 }
443
444 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
445
446 /*
447  * Scan the current list of tasks blocked within RCU read-side critical
448  * sections, printing out the tid of each.
449  */
450 static int rcu_print_task_stall(struct rcu_node *rnp)
451 {
452         struct task_struct *t;
453         int ndetected = 0;
454
455         if (!rcu_preempt_blocked_readers_cgp(rnp))
456                 return 0;
457         rcu_print_task_stall_begin(rnp);
458         t = list_entry(rnp->gp_tasks->prev,
459                        struct task_struct, rcu_node_entry);
460         list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
461                 pr_cont(" P%d", t->pid);
462                 ndetected++;
463         }
464         rcu_print_task_stall_end();
465         return ndetected;
466 }
467
468 /*
469  * Check that the list of blocked tasks for the newly completed grace
470  * period is in fact empty.  It is a serious bug to complete a grace
471  * period that still has RCU readers blocked!  This function must be
472  * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
473  * must be held by the caller.
474  *
475  * Also, if there are blocked tasks on the list, they automatically
476  * block the newly created grace period, so set up ->gp_tasks accordingly.
477  */
478 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
479 {
480         WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
481         if (rcu_preempt_has_tasks(rnp))
482                 rnp->gp_tasks = rnp->blkd_tasks.next;
483         WARN_ON_ONCE(rnp->qsmask);
484 }
485
486 /*
487  * Check for a quiescent state from the current CPU.  When a task blocks,
488  * the task is recorded in the corresponding CPU's rcu_node structure,
489  * which is checked elsewhere.
490  *
491  * Caller must disable hard irqs.
492  */
493 static void rcu_preempt_check_callbacks(void)
494 {
495         struct task_struct *t = current;
496
497         if (t->rcu_read_lock_nesting == 0) {
498                 rcu_preempt_qs();
499                 return;
500         }
501         if (t->rcu_read_lock_nesting > 0 &&
502             __this_cpu_read(rcu_data_p->qs_pending) &&
503             !__this_cpu_read(rcu_data_p->passed_quiesce))
504                 t->rcu_read_unlock_special.b.need_qs = true;
505 }
506
507 #ifdef CONFIG_RCU_BOOST
508
509 static void rcu_preempt_do_callbacks(void)
510 {
511         rcu_do_batch(rcu_state_p, this_cpu_ptr(rcu_data_p));
512 }
513
514 #endif /* #ifdef CONFIG_RCU_BOOST */
515
516 /*
517  * Queue a preemptible-RCU callback for invocation after a grace period.
518  */
519 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
520 {
521         __call_rcu(head, func, rcu_state_p, -1, 0);
522 }
523 EXPORT_SYMBOL_GPL(call_rcu);
524
525 /**
526  * synchronize_rcu - wait until a grace period has elapsed.
527  *
528  * Control will return to the caller some time after a full grace
529  * period has elapsed, in other words after all currently executing RCU
530  * read-side critical sections have completed.  Note, however, that
531  * upon return from synchronize_rcu(), the caller might well be executing
532  * concurrently with new RCU read-side critical sections that began while
533  * synchronize_rcu() was waiting.  RCU read-side critical sections are
534  * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
535  *
536  * See the description of synchronize_sched() for more detailed information
537  * on memory ordering guarantees.
538  */
539 void synchronize_rcu(void)
540 {
541         rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
542                            !lock_is_held(&rcu_lock_map) &&
543                            !lock_is_held(&rcu_sched_lock_map),
544                            "Illegal synchronize_rcu() in RCU read-side critical section");
545         if (!rcu_scheduler_active)
546                 return;
547         if (rcu_gp_is_expedited())
548                 synchronize_rcu_expedited();
549         else
550                 wait_rcu_gp(call_rcu);
551 }
552 EXPORT_SYMBOL_GPL(synchronize_rcu);
553
554 static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
555 static unsigned long sync_rcu_preempt_exp_count;
556 static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
557
558 /*
559  * Return non-zero if there are any tasks in RCU read-side critical
560  * sections blocking the current preemptible-RCU expedited grace period.
561  * If there is no preemptible-RCU expedited grace period currently in
562  * progress, returns zero unconditionally.
563  */
564 static int rcu_preempted_readers_exp(struct rcu_node *rnp)
565 {
566         return rnp->exp_tasks != NULL;
567 }
568
569 /*
570  * return non-zero if there is no RCU expedited grace period in progress
571  * for the specified rcu_node structure, in other words, if all CPUs and
572  * tasks covered by the specified rcu_node structure have done their bit
573  * for the current expedited grace period.  Works only for preemptible
574  * RCU -- other RCU implementation use other means.
575  *
576  * Caller must hold sync_rcu_preempt_exp_mutex.
577  */
578 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
579 {
580         return !rcu_preempted_readers_exp(rnp) &&
581                READ_ONCE(rnp->expmask) == 0;
582 }
583
584 /*
585  * Report the exit from RCU read-side critical section for the last task
586  * that queued itself during or before the current expedited preemptible-RCU
587  * grace period.  This event is reported either to the rcu_node structure on
588  * which the task was queued or to one of that rcu_node structure's ancestors,
589  * recursively up the tree.  (Calm down, calm down, we do the recursion
590  * iteratively!)
591  *
592  * Caller must hold sync_rcu_preempt_exp_mutex.
593  */
594 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
595                                bool wake)
596 {
597         unsigned long flags;
598         unsigned long mask;
599
600         raw_spin_lock_irqsave(&rnp->lock, flags);
601         smp_mb__after_unlock_lock();
602         for (;;) {
603                 if (!sync_rcu_preempt_exp_done(rnp)) {
604                         raw_spin_unlock_irqrestore(&rnp->lock, flags);
605                         break;
606                 }
607                 if (rnp->parent == NULL) {
608                         raw_spin_unlock_irqrestore(&rnp->lock, flags);
609                         if (wake) {
610                                 smp_mb(); /* EGP done before wake_up(). */
611                                 wake_up(&sync_rcu_preempt_exp_wq);
612                         }
613                         break;
614                 }
615                 mask = rnp->grpmask;
616                 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
617                 rnp = rnp->parent;
618                 raw_spin_lock(&rnp->lock); /* irqs already disabled */
619                 smp_mb__after_unlock_lock();
620                 rnp->expmask &= ~mask;
621         }
622 }
623
624 /*
625  * Snapshot the tasks blocking the newly started preemptible-RCU expedited
626  * grace period for the specified rcu_node structure, phase 1.  If there
627  * are such tasks, set the ->expmask bits up the rcu_node tree and also
628  * set the ->expmask bits on the leaf rcu_node structures to tell phase 2
629  * that work is needed here.
630  *
631  * Caller must hold sync_rcu_preempt_exp_mutex.
632  */
633 static void
634 sync_rcu_preempt_exp_init1(struct rcu_state *rsp, struct rcu_node *rnp)
635 {
636         unsigned long flags;
637         unsigned long mask;
638         struct rcu_node *rnp_up;
639
640         raw_spin_lock_irqsave(&rnp->lock, flags);
641         smp_mb__after_unlock_lock();
642         WARN_ON_ONCE(rnp->expmask);
643         WARN_ON_ONCE(rnp->exp_tasks);
644         if (!rcu_preempt_has_tasks(rnp)) {
645                 /* No blocked tasks, nothing to do. */
646                 raw_spin_unlock_irqrestore(&rnp->lock, flags);
647                 return;
648         }
649         /* Call for Phase 2 and propagate ->expmask bits up the tree. */
650         rnp->expmask = 1;
651         rnp_up = rnp;
652         while (rnp_up->parent) {
653                 mask = rnp_up->grpmask;
654                 rnp_up = rnp_up->parent;
655                 if (rnp_up->expmask & mask)
656                         break;
657                 raw_spin_lock(&rnp_up->lock); /* irqs already off */
658                 smp_mb__after_unlock_lock();
659                 rnp_up->expmask |= mask;
660                 raw_spin_unlock(&rnp_up->lock); /* irqs still off */
661         }
662         raw_spin_unlock_irqrestore(&rnp->lock, flags);
663 }
664
665 /*
666  * Snapshot the tasks blocking the newly started preemptible-RCU expedited
667  * grace period for the specified rcu_node structure, phase 2.  If the
668  * leaf rcu_node structure has its ->expmask field set, check for tasks.
669  * If there are some, clear ->expmask and set ->exp_tasks accordingly,
670  * then initiate RCU priority boosting.  Otherwise, clear ->expmask and
671  * invoke rcu_report_exp_rnp() to clear out the upper-level ->expmask bits,
672  * enabling rcu_read_unlock_special() to do the bit-clearing.
673  *
674  * Caller must hold sync_rcu_preempt_exp_mutex.
675  */
676 static void
677 sync_rcu_preempt_exp_init2(struct rcu_state *rsp, struct rcu_node *rnp)
678 {
679         unsigned long flags;
680
681         raw_spin_lock_irqsave(&rnp->lock, flags);
682         smp_mb__after_unlock_lock();
683         if (!rnp->expmask) {
684                 /* Phase 1 didn't do anything, so Phase 2 doesn't either. */
685                 raw_spin_unlock_irqrestore(&rnp->lock, flags);
686                 return;
687         }
688
689         /* Phase 1 is over. */
690         rnp->expmask = 0;
691
692         /*
693          * If there are still blocked tasks, set up ->exp_tasks so that
694          * rcu_read_unlock_special() will wake us and then boost them.
695          */
696         if (rcu_preempt_has_tasks(rnp)) {
697                 rnp->exp_tasks = rnp->blkd_tasks.next;
698                 rcu_initiate_boost(rnp, flags);  /* releases rnp->lock */
699                 return;
700         }
701
702         /* No longer any blocked tasks, so undo bit setting. */
703         raw_spin_unlock_irqrestore(&rnp->lock, flags);
704         rcu_report_exp_rnp(rsp, rnp, false);
705 }
706
707 /**
708  * synchronize_rcu_expedited - Brute-force RCU grace period
709  *
710  * Wait for an RCU-preempt grace period, but expedite it.  The basic
711  * idea is to invoke synchronize_sched_expedited() to push all the tasks to
712  * the ->blkd_tasks lists and wait for this list to drain.  This consumes
713  * significant time on all CPUs and is unfriendly to real-time workloads,
714  * so is thus not recommended for any sort of common-case code.
715  * In fact, if you are using synchronize_rcu_expedited() in a loop,
716  * please restructure your code to batch your updates, and then Use a
717  * single synchronize_rcu() instead.
718  */
719 void synchronize_rcu_expedited(void)
720 {
721         struct rcu_node *rnp;
722         struct rcu_state *rsp = rcu_state_p;
723         unsigned long snap;
724         int trycount = 0;
725
726         smp_mb(); /* Caller's modifications seen first by other CPUs. */
727         snap = READ_ONCE(sync_rcu_preempt_exp_count) + 1;
728         smp_mb(); /* Above access cannot bleed into critical section. */
729
730         /*
731          * Block CPU-hotplug operations.  This means that any CPU-hotplug
732          * operation that finds an rcu_node structure with tasks in the
733          * process of being boosted will know that all tasks blocking
734          * this expedited grace period will already be in the process of
735          * being boosted.  This simplifies the process of moving tasks
736          * from leaf to root rcu_node structures.
737          */
738         if (!try_get_online_cpus()) {
739                 /* CPU-hotplug operation in flight, fall back to normal GP. */
740                 wait_rcu_gp(call_rcu);
741                 return;
742         }
743
744         /*
745          * Acquire lock, falling back to synchronize_rcu() if too many
746          * lock-acquisition failures.  Of course, if someone does the
747          * expedited grace period for us, just leave.
748          */
749         while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
750                 if (ULONG_CMP_LT(snap,
751                     READ_ONCE(sync_rcu_preempt_exp_count))) {
752                         put_online_cpus();
753                         goto mb_ret; /* Others did our work for us. */
754                 }
755                 if (trycount++ < 10) {
756                         udelay(trycount * num_online_cpus());
757                 } else {
758                         put_online_cpus();
759                         wait_rcu_gp(call_rcu);
760                         return;
761                 }
762         }
763         if (ULONG_CMP_LT(snap, READ_ONCE(sync_rcu_preempt_exp_count))) {
764                 put_online_cpus();
765                 goto unlock_mb_ret; /* Others did our work for us. */
766         }
767
768         /* force all RCU readers onto ->blkd_tasks lists. */
769         synchronize_sched_expedited();
770
771         /*
772          * Snapshot current state of ->blkd_tasks lists into ->expmask.
773          * Phase 1 sets bits and phase 2 permits rcu_read_unlock_special()
774          * to start clearing them.  Doing this in one phase leads to
775          * strange races between setting and clearing bits, so just say "no"!
776          */
777         rcu_for_each_leaf_node(rsp, rnp)
778                 sync_rcu_preempt_exp_init1(rsp, rnp);
779         rcu_for_each_leaf_node(rsp, rnp)
780                 sync_rcu_preempt_exp_init2(rsp, rnp);
781
782         put_online_cpus();
783
784         /* Wait for snapshotted ->blkd_tasks lists to drain. */
785         rnp = rcu_get_root(rsp);
786         wait_event(sync_rcu_preempt_exp_wq,
787                    sync_rcu_preempt_exp_done(rnp));
788
789         /* Clean up and exit. */
790         smp_mb(); /* ensure expedited GP seen before counter increment. */
791         WRITE_ONCE(sync_rcu_preempt_exp_count, sync_rcu_preempt_exp_count + 1);
792 unlock_mb_ret:
793         mutex_unlock(&sync_rcu_preempt_exp_mutex);
794 mb_ret:
795         smp_mb(); /* ensure subsequent action seen after grace period. */
796 }
797 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
798
799 /**
800  * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
801  *
802  * Note that this primitive does not necessarily wait for an RCU grace period
803  * to complete.  For example, if there are no RCU callbacks queued anywhere
804  * in the system, then rcu_barrier() is within its rights to return
805  * immediately, without waiting for anything, much less an RCU grace period.
806  */
807 void rcu_barrier(void)
808 {
809         _rcu_barrier(rcu_state_p);
810 }
811 EXPORT_SYMBOL_GPL(rcu_barrier);
812
813 /*
814  * Initialize preemptible RCU's state structures.
815  */
816 static void __init __rcu_init_preempt(void)
817 {
818         rcu_init_one(rcu_state_p, rcu_data_p);
819 }
820
821 /*
822  * Check for a task exiting while in a preemptible-RCU read-side
823  * critical section, clean up if so.  No need to issue warnings,
824  * as debug_check_no_locks_held() already does this if lockdep
825  * is enabled.
826  */
827 void exit_rcu(void)
828 {
829         struct task_struct *t = current;
830
831         if (likely(list_empty(&current->rcu_node_entry)))
832                 return;
833         t->rcu_read_lock_nesting = 1;
834         barrier();
835         t->rcu_read_unlock_special.b.blocked = true;
836         __rcu_read_unlock();
837 }
838
839 #else /* #ifdef CONFIG_PREEMPT_RCU */
840
841 static struct rcu_state *const rcu_state_p = &rcu_sched_state;
842 static struct rcu_data __percpu *const rcu_data_p = &rcu_sched_data;
843
844 /*
845  * Tell them what RCU they are running.
846  */
847 static void __init rcu_bootup_announce(void)
848 {
849         pr_info("Hierarchical RCU implementation.\n");
850         rcu_bootup_announce_oddness();
851 }
852
853 /*
854  * Because preemptible RCU does not exist, we never have to check for
855  * CPUs being in quiescent states.
856  */
857 static void rcu_preempt_note_context_switch(void)
858 {
859 }
860
861 /*
862  * Because preemptible RCU does not exist, there are never any preempted
863  * RCU readers.
864  */
865 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
866 {
867         return 0;
868 }
869
870 /*
871  * Because there is no preemptible RCU, there can be no readers blocked.
872  */
873 static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
874 {
875         return false;
876 }
877
878 /*
879  * Because preemptible RCU does not exist, we never have to check for
880  * tasks blocked within RCU read-side critical sections.
881  */
882 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
883 {
884 }
885
886 /*
887  * Because preemptible RCU does not exist, we never have to check for
888  * tasks blocked within RCU read-side critical sections.
889  */
890 static int rcu_print_task_stall(struct rcu_node *rnp)
891 {
892         return 0;
893 }
894
895 /*
896  * Because there is no preemptible RCU, there can be no readers blocked,
897  * so there is no need to check for blocked tasks.  So check only for
898  * bogus qsmask values.
899  */
900 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
901 {
902         WARN_ON_ONCE(rnp->qsmask);
903 }
904
905 /*
906  * Because preemptible RCU does not exist, it never has any callbacks
907  * to check.
908  */
909 static void rcu_preempt_check_callbacks(void)
910 {
911 }
912
913 /*
914  * Wait for an rcu-preempt grace period, but make it happen quickly.
915  * But because preemptible RCU does not exist, map to rcu-sched.
916  */
917 void synchronize_rcu_expedited(void)
918 {
919         synchronize_sched_expedited();
920 }
921 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
922
923 /*
924  * Because preemptible RCU does not exist, rcu_barrier() is just
925  * another name for rcu_barrier_sched().
926  */
927 void rcu_barrier(void)
928 {
929         rcu_barrier_sched();
930 }
931 EXPORT_SYMBOL_GPL(rcu_barrier);
932
933 /*
934  * Because preemptible RCU does not exist, it need not be initialized.
935  */
936 static void __init __rcu_init_preempt(void)
937 {
938 }
939
940 /*
941  * Because preemptible RCU does not exist, tasks cannot possibly exit
942  * while in preemptible RCU read-side critical sections.
943  */
944 void exit_rcu(void)
945 {
946 }
947
948 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
949
950 #ifdef CONFIG_RCU_BOOST
951
952 #include "../locking/rtmutex_common.h"
953
954 #ifdef CONFIG_RCU_TRACE
955
956 static void rcu_initiate_boost_trace(struct rcu_node *rnp)
957 {
958         if (!rcu_preempt_has_tasks(rnp))
959                 rnp->n_balk_blkd_tasks++;
960         else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
961                 rnp->n_balk_exp_gp_tasks++;
962         else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
963                 rnp->n_balk_boost_tasks++;
964         else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
965                 rnp->n_balk_notblocked++;
966         else if (rnp->gp_tasks != NULL &&
967                  ULONG_CMP_LT(jiffies, rnp->boost_time))
968                 rnp->n_balk_notyet++;
969         else
970                 rnp->n_balk_nos++;
971 }
972
973 #else /* #ifdef CONFIG_RCU_TRACE */
974
975 static void rcu_initiate_boost_trace(struct rcu_node *rnp)
976 {
977 }
978
979 #endif /* #else #ifdef CONFIG_RCU_TRACE */
980
981 static void rcu_wake_cond(struct task_struct *t, int status)
982 {
983         /*
984          * If the thread is yielding, only wake it when this
985          * is invoked from idle
986          */
987         if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
988                 wake_up_process(t);
989 }
990
991 /*
992  * Carry out RCU priority boosting on the task indicated by ->exp_tasks
993  * or ->boost_tasks, advancing the pointer to the next task in the
994  * ->blkd_tasks list.
995  *
996  * Note that irqs must be enabled: boosting the task can block.
997  * Returns 1 if there are more tasks needing to be boosted.
998  */
999 static int rcu_boost(struct rcu_node *rnp)
1000 {
1001         unsigned long flags;
1002         struct task_struct *t;
1003         struct list_head *tb;
1004
1005         if (READ_ONCE(rnp->exp_tasks) == NULL &&
1006             READ_ONCE(rnp->boost_tasks) == NULL)
1007                 return 0;  /* Nothing left to boost. */
1008
1009         raw_spin_lock_irqsave(&rnp->lock, flags);
1010         smp_mb__after_unlock_lock();
1011
1012         /*
1013          * Recheck under the lock: all tasks in need of boosting
1014          * might exit their RCU read-side critical sections on their own.
1015          */
1016         if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1017                 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1018                 return 0;
1019         }
1020
1021         /*
1022          * Preferentially boost tasks blocking expedited grace periods.
1023          * This cannot starve the normal grace periods because a second
1024          * expedited grace period must boost all blocked tasks, including
1025          * those blocking the pre-existing normal grace period.
1026          */
1027         if (rnp->exp_tasks != NULL) {
1028                 tb = rnp->exp_tasks;
1029                 rnp->n_exp_boosts++;
1030         } else {
1031                 tb = rnp->boost_tasks;
1032                 rnp->n_normal_boosts++;
1033         }
1034         rnp->n_tasks_boosted++;
1035
1036         /*
1037          * We boost task t by manufacturing an rt_mutex that appears to
1038          * be held by task t.  We leave a pointer to that rt_mutex where
1039          * task t can find it, and task t will release the mutex when it
1040          * exits its outermost RCU read-side critical section.  Then
1041          * simply acquiring this artificial rt_mutex will boost task
1042          * t's priority.  (Thanks to tglx for suggesting this approach!)
1043          *
1044          * Note that task t must acquire rnp->lock to remove itself from
1045          * the ->blkd_tasks list, which it will do from exit() if from
1046          * nowhere else.  We therefore are guaranteed that task t will
1047          * stay around at least until we drop rnp->lock.  Note that
1048          * rnp->lock also resolves races between our priority boosting
1049          * and task t's exiting its outermost RCU read-side critical
1050          * section.
1051          */
1052         t = container_of(tb, struct task_struct, rcu_node_entry);
1053         rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
1054         raw_spin_unlock_irqrestore(&rnp->lock, flags);
1055         /* Lock only for side effect: boosts task t's priority. */
1056         rt_mutex_lock(&rnp->boost_mtx);
1057         rt_mutex_unlock(&rnp->boost_mtx);  /* Then keep lockdep happy. */
1058
1059         return READ_ONCE(rnp->exp_tasks) != NULL ||
1060                READ_ONCE(rnp->boost_tasks) != NULL;
1061 }
1062
1063 /*
1064  * Priority-boosting kthread.  One per leaf rcu_node and one for the
1065  * root rcu_node.
1066  */
1067 static int rcu_boost_kthread(void *arg)
1068 {
1069         struct rcu_node *rnp = (struct rcu_node *)arg;
1070         int spincnt = 0;
1071         int more2boost;
1072
1073         trace_rcu_utilization(TPS("Start boost kthread@init"));
1074         for (;;) {
1075                 rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
1076                 trace_rcu_utilization(TPS("End boost kthread@rcu_wait"));
1077                 rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
1078                 trace_rcu_utilization(TPS("Start boost kthread@rcu_wait"));
1079                 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
1080                 more2boost = rcu_boost(rnp);
1081                 if (more2boost)
1082                         spincnt++;
1083                 else
1084                         spincnt = 0;
1085                 if (spincnt > 10) {
1086                         rnp->boost_kthread_status = RCU_KTHREAD_YIELDING;
1087                         trace_rcu_utilization(TPS("End boost kthread@rcu_yield"));
1088                         schedule_timeout_interruptible(2);
1089                         trace_rcu_utilization(TPS("Start boost kthread@rcu_yield"));
1090                         spincnt = 0;
1091                 }
1092         }
1093         /* NOTREACHED */
1094         trace_rcu_utilization(TPS("End boost kthread@notreached"));
1095         return 0;
1096 }
1097
1098 /*
1099  * Check to see if it is time to start boosting RCU readers that are
1100  * blocking the current grace period, and, if so, tell the per-rcu_node
1101  * kthread to start boosting them.  If there is an expedited grace
1102  * period in progress, it is always time to boost.
1103  *
1104  * The caller must hold rnp->lock, which this function releases.
1105  * The ->boost_kthread_task is immortal, so we don't need to worry
1106  * about it going away.
1107  */
1108 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1109         __releases(rnp->lock)
1110 {
1111         struct task_struct *t;
1112
1113         if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1114                 rnp->n_balk_exp_gp_tasks++;
1115                 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1116                 return;
1117         }
1118         if (rnp->exp_tasks != NULL ||
1119             (rnp->gp_tasks != NULL &&
1120              rnp->boost_tasks == NULL &&
1121              rnp->qsmask == 0 &&
1122              ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1123                 if (rnp->exp_tasks == NULL)
1124                         rnp->boost_tasks = rnp->gp_tasks;
1125                 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1126                 t = rnp->boost_kthread_task;
1127                 if (t)
1128                         rcu_wake_cond(t, rnp->boost_kthread_status);
1129         } else {
1130                 rcu_initiate_boost_trace(rnp);
1131                 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1132         }
1133 }
1134
1135 /*
1136  * Wake up the per-CPU kthread to invoke RCU callbacks.
1137  */
1138 static void invoke_rcu_callbacks_kthread(void)
1139 {
1140         unsigned long flags;
1141
1142         local_irq_save(flags);
1143         __this_cpu_write(rcu_cpu_has_work, 1);
1144         if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
1145             current != __this_cpu_read(rcu_cpu_kthread_task)) {
1146                 rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
1147                               __this_cpu_read(rcu_cpu_kthread_status));
1148         }
1149         local_irq_restore(flags);
1150 }
1151
1152 /*
1153  * Is the current CPU running the RCU-callbacks kthread?
1154  * Caller must have preemption disabled.
1155  */
1156 static bool rcu_is_callbacks_kthread(void)
1157 {
1158         return __this_cpu_read(rcu_cpu_kthread_task) == current;
1159 }
1160
1161 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1162
1163 /*
1164  * Do priority-boost accounting for the start of a new grace period.
1165  */
1166 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1167 {
1168         rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1169 }
1170
1171 /*
1172  * Create an RCU-boost kthread for the specified node if one does not
1173  * already exist.  We only create this kthread for preemptible RCU.
1174  * Returns zero if all is well, a negated errno otherwise.
1175  */
1176 static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1177                                        struct rcu_node *rnp)
1178 {
1179         int rnp_index = rnp - &rsp->node[0];
1180         unsigned long flags;
1181         struct sched_param sp;
1182         struct task_struct *t;
1183
1184         if (rcu_state_p != rsp)
1185                 return 0;
1186
1187         if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0)
1188                 return 0;
1189
1190         rsp->boost = 1;
1191         if (rnp->boost_kthread_task != NULL)
1192                 return 0;
1193         t = kthread_create(rcu_boost_kthread, (void *)rnp,
1194                            "rcub/%d", rnp_index);
1195         if (IS_ERR(t))
1196                 return PTR_ERR(t);
1197         raw_spin_lock_irqsave(&rnp->lock, flags);
1198         smp_mb__after_unlock_lock();
1199         rnp->boost_kthread_task = t;
1200         raw_spin_unlock_irqrestore(&rnp->lock, flags);
1201         sp.sched_priority = kthread_prio;
1202         sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1203         wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1204         return 0;
1205 }
1206
1207 static void rcu_kthread_do_work(void)
1208 {
1209         rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data));
1210         rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data));
1211         rcu_preempt_do_callbacks();
1212 }
1213
1214 static void rcu_cpu_kthread_setup(unsigned int cpu)
1215 {
1216         struct sched_param sp;
1217
1218         sp.sched_priority = kthread_prio;
1219         sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1220 }
1221
1222 static void rcu_cpu_kthread_park(unsigned int cpu)
1223 {
1224         per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1225 }
1226
1227 static int rcu_cpu_kthread_should_run(unsigned int cpu)
1228 {
1229         return __this_cpu_read(rcu_cpu_has_work);
1230 }
1231
1232 /*
1233  * Per-CPU kernel thread that invokes RCU callbacks.  This replaces the
1234  * RCU softirq used in flavors and configurations of RCU that do not
1235  * support RCU priority boosting.
1236  */
1237 static void rcu_cpu_kthread(unsigned int cpu)
1238 {
1239         unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
1240         char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
1241         int spincnt;
1242
1243         for (spincnt = 0; spincnt < 10; spincnt++) {
1244                 trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
1245                 local_bh_disable();
1246                 *statusp = RCU_KTHREAD_RUNNING;
1247                 this_cpu_inc(rcu_cpu_kthread_loops);
1248                 local_irq_disable();
1249                 work = *workp;
1250                 *workp = 0;
1251                 local_irq_enable();
1252                 if (work)
1253                         rcu_kthread_do_work();
1254                 local_bh_enable();
1255                 if (*workp == 0) {
1256                         trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
1257                         *statusp = RCU_KTHREAD_WAITING;
1258                         return;
1259                 }
1260         }
1261         *statusp = RCU_KTHREAD_YIELDING;
1262         trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
1263         schedule_timeout_interruptible(2);
1264         trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
1265         *statusp = RCU_KTHREAD_WAITING;
1266 }
1267
1268 /*
1269  * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1270  * served by the rcu_node in question.  The CPU hotplug lock is still
1271  * held, so the value of rnp->qsmaskinit will be stable.
1272  *
1273  * We don't include outgoingcpu in the affinity set, use -1 if there is
1274  * no outgoing CPU.  If there are no CPUs left in the affinity set,
1275  * this function allows the kthread to execute on any CPU.
1276  */
1277 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1278 {
1279         struct task_struct *t = rnp->boost_kthread_task;
1280         unsigned long mask = rcu_rnp_online_cpus(rnp);
1281         cpumask_var_t cm;
1282         int cpu;
1283
1284         if (!t)
1285                 return;
1286         if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
1287                 return;
1288         for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1289                 if ((mask & 0x1) && cpu != outgoingcpu)
1290                         cpumask_set_cpu(cpu, cm);
1291         if (cpumask_weight(cm) == 0)
1292                 cpumask_setall(cm);
1293         set_cpus_allowed_ptr(t, cm);
1294         free_cpumask_var(cm);
1295 }
1296
1297 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
1298         .store                  = &rcu_cpu_kthread_task,
1299         .thread_should_run      = rcu_cpu_kthread_should_run,
1300         .thread_fn              = rcu_cpu_kthread,
1301         .thread_comm            = "rcuc/%u",
1302         .setup                  = rcu_cpu_kthread_setup,
1303         .park                   = rcu_cpu_kthread_park,
1304 };
1305
1306 /*
1307  * Spawn boost kthreads -- called as soon as the scheduler is running.
1308  */
1309 static void __init rcu_spawn_boost_kthreads(void)
1310 {
1311         struct rcu_node *rnp;
1312         int cpu;
1313
1314         for_each_possible_cpu(cpu)
1315                 per_cpu(rcu_cpu_has_work, cpu) = 0;
1316         BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
1317         rcu_for_each_leaf_node(rcu_state_p, rnp)
1318                 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
1319 }
1320
1321 static void rcu_prepare_kthreads(int cpu)
1322 {
1323         struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
1324         struct rcu_node *rnp = rdp->mynode;
1325
1326         /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1327         if (rcu_scheduler_fully_active)
1328                 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
1329 }
1330
1331 #else /* #ifdef CONFIG_RCU_BOOST */
1332
1333 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1334         __releases(rnp->lock)
1335 {
1336         raw_spin_unlock_irqrestore(&rnp->lock, flags);
1337 }
1338
1339 static void invoke_rcu_callbacks_kthread(void)
1340 {
1341         WARN_ON_ONCE(1);
1342 }
1343
1344 static bool rcu_is_callbacks_kthread(void)
1345 {
1346         return false;
1347 }
1348
1349 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1350 {
1351 }
1352
1353 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1354 {
1355 }
1356
1357 static void __init rcu_spawn_boost_kthreads(void)
1358 {
1359 }
1360
1361 static void rcu_prepare_kthreads(int cpu)
1362 {
1363 }
1364
1365 #endif /* #else #ifdef CONFIG_RCU_BOOST */
1366
1367 #if !defined(CONFIG_RCU_FAST_NO_HZ)
1368
1369 /*
1370  * Check to see if any future RCU-related work will need to be done
1371  * by the current CPU, even if none need be done immediately, returning
1372  * 1 if so.  This function is part of the RCU implementation; it is -not-
1373  * an exported member of the RCU API.
1374  *
1375  * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
1376  * any flavor of RCU.
1377  */
1378 int rcu_needs_cpu(u64 basemono, u64 *nextevt)
1379 {
1380         *nextevt = KTIME_MAX;
1381         return IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL)
1382                ? 0 : rcu_cpu_has_callbacks(NULL);
1383 }
1384
1385 /*
1386  * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
1387  * after it.
1388  */
1389 static void rcu_cleanup_after_idle(void)
1390 {
1391 }
1392
1393 /*
1394  * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
1395  * is nothing.
1396  */
1397 static void rcu_prepare_for_idle(void)
1398 {
1399 }
1400
1401 /*
1402  * Don't bother keeping a running count of the number of RCU callbacks
1403  * posted because CONFIG_RCU_FAST_NO_HZ=n.
1404  */
1405 static void rcu_idle_count_callbacks_posted(void)
1406 {
1407 }
1408
1409 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1410
1411 /*
1412  * This code is invoked when a CPU goes idle, at which point we want
1413  * to have the CPU do everything required for RCU so that it can enter
1414  * the energy-efficient dyntick-idle mode.  This is handled by a
1415  * state machine implemented by rcu_prepare_for_idle() below.
1416  *
1417  * The following three proprocessor symbols control this state machine:
1418  *
1419  * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
1420  *      to sleep in dyntick-idle mode with RCU callbacks pending.  This
1421  *      is sized to be roughly one RCU grace period.  Those energy-efficiency
1422  *      benchmarkers who might otherwise be tempted to set this to a large
1423  *      number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
1424  *      system.  And if you are -that- concerned about energy efficiency,
1425  *      just power the system down and be done with it!
1426  * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is
1427  *      permitted to sleep in dyntick-idle mode with only lazy RCU
1428  *      callbacks pending.  Setting this too high can OOM your system.
1429  *
1430  * The values below work well in practice.  If future workloads require
1431  * adjustment, they can be converted into kernel config parameters, though
1432  * making the state machine smarter might be a better option.
1433  */
1434 #define RCU_IDLE_GP_DELAY 4             /* Roughly one grace period. */
1435 #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */
1436
1437 static int rcu_idle_gp_delay = RCU_IDLE_GP_DELAY;
1438 module_param(rcu_idle_gp_delay, int, 0644);
1439 static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY;
1440 module_param(rcu_idle_lazy_gp_delay, int, 0644);
1441
1442 /*
1443  * Try to advance callbacks for all flavors of RCU on the current CPU, but
1444  * only if it has been awhile since the last time we did so.  Afterwards,
1445  * if there are any callbacks ready for immediate invocation, return true.
1446  */
1447 static bool __maybe_unused rcu_try_advance_all_cbs(void)
1448 {
1449         bool cbs_ready = false;
1450         struct rcu_data *rdp;
1451         struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1452         struct rcu_node *rnp;
1453         struct rcu_state *rsp;
1454
1455         /* Exit early if we advanced recently. */
1456         if (jiffies == rdtp->last_advance_all)
1457                 return false;
1458         rdtp->last_advance_all = jiffies;
1459
1460         for_each_rcu_flavor(rsp) {
1461                 rdp = this_cpu_ptr(rsp->rda);
1462                 rnp = rdp->mynode;
1463
1464                 /*
1465                  * Don't bother checking unless a grace period has
1466                  * completed since we last checked and there are
1467                  * callbacks not yet ready to invoke.
1468                  */
1469                 if ((rdp->completed != rnp->completed ||
1470                      unlikely(READ_ONCE(rdp->gpwrap))) &&
1471                     rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL])
1472                         note_gp_changes(rsp, rdp);
1473
1474                 if (cpu_has_callbacks_ready_to_invoke(rdp))
1475                         cbs_ready = true;
1476         }
1477         return cbs_ready;
1478 }
1479
1480 /*
1481  * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
1482  * to invoke.  If the CPU has callbacks, try to advance them.  Tell the
1483  * caller to set the timeout based on whether or not there are non-lazy
1484  * callbacks.
1485  *
1486  * The caller must have disabled interrupts.
1487  */
1488 int rcu_needs_cpu(u64 basemono, u64 *nextevt)
1489 {
1490         struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1491         unsigned long dj;
1492
1493         if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL)) {
1494                 *nextevt = KTIME_MAX;
1495                 return 0;
1496         }
1497
1498         /* Snapshot to detect later posting of non-lazy callback. */
1499         rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
1500
1501         /* If no callbacks, RCU doesn't need the CPU. */
1502         if (!rcu_cpu_has_callbacks(&rdtp->all_lazy)) {
1503                 *nextevt = KTIME_MAX;
1504                 return 0;
1505         }
1506
1507         /* Attempt to advance callbacks. */
1508         if (rcu_try_advance_all_cbs()) {
1509                 /* Some ready to invoke, so initiate later invocation. */
1510                 invoke_rcu_core();
1511                 return 1;
1512         }
1513         rdtp->last_accelerate = jiffies;
1514
1515         /* Request timer delay depending on laziness, and round. */
1516         if (!rdtp->all_lazy) {
1517                 dj = round_up(rcu_idle_gp_delay + jiffies,
1518                                rcu_idle_gp_delay) - jiffies;
1519         } else {
1520                 dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies;
1521         }
1522         *nextevt = basemono + dj * TICK_NSEC;
1523         return 0;
1524 }
1525
1526 /*
1527  * Prepare a CPU for idle from an RCU perspective.  The first major task
1528  * is to sense whether nohz mode has been enabled or disabled via sysfs.
1529  * The second major task is to check to see if a non-lazy callback has
1530  * arrived at a CPU that previously had only lazy callbacks.  The third
1531  * major task is to accelerate (that is, assign grace-period numbers to)
1532  * any recently arrived callbacks.
1533  *
1534  * The caller must have disabled interrupts.
1535  */
1536 static void rcu_prepare_for_idle(void)
1537 {
1538         bool needwake;
1539         struct rcu_data *rdp;
1540         struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1541         struct rcu_node *rnp;
1542         struct rcu_state *rsp;
1543         int tne;
1544
1545         if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL))
1546                 return;
1547
1548         /* Handle nohz enablement switches conservatively. */
1549         tne = READ_ONCE(tick_nohz_active);
1550         if (tne != rdtp->tick_nohz_enabled_snap) {
1551                 if (rcu_cpu_has_callbacks(NULL))
1552                         invoke_rcu_core(); /* force nohz to see update. */
1553                 rdtp->tick_nohz_enabled_snap = tne;
1554                 return;
1555         }
1556         if (!tne)
1557                 return;
1558
1559         /* If this is a no-CBs CPU, no callbacks, just return. */
1560         if (rcu_is_nocb_cpu(smp_processor_id()))
1561                 return;
1562
1563         /*
1564          * If a non-lazy callback arrived at a CPU having only lazy
1565          * callbacks, invoke RCU core for the side-effect of recalculating
1566          * idle duration on re-entry to idle.
1567          */
1568         if (rdtp->all_lazy &&
1569             rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) {
1570                 rdtp->all_lazy = false;
1571                 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
1572                 invoke_rcu_core();
1573                 return;
1574         }
1575
1576         /*
1577          * If we have not yet accelerated this jiffy, accelerate all
1578          * callbacks on this CPU.
1579          */
1580         if (rdtp->last_accelerate == jiffies)
1581                 return;
1582         rdtp->last_accelerate = jiffies;
1583         for_each_rcu_flavor(rsp) {
1584                 rdp = this_cpu_ptr(rsp->rda);
1585                 if (!*rdp->nxttail[RCU_DONE_TAIL])
1586                         continue;
1587                 rnp = rdp->mynode;
1588                 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
1589                 smp_mb__after_unlock_lock();
1590                 needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
1591                 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
1592                 if (needwake)
1593                         rcu_gp_kthread_wake(rsp);
1594         }
1595 }
1596
1597 /*
1598  * Clean up for exit from idle.  Attempt to advance callbacks based on
1599  * any grace periods that elapsed while the CPU was idle, and if any
1600  * callbacks are now ready to invoke, initiate invocation.
1601  */
1602 static void rcu_cleanup_after_idle(void)
1603 {
1604         if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL) ||
1605             rcu_is_nocb_cpu(smp_processor_id()))
1606                 return;
1607         if (rcu_try_advance_all_cbs())
1608                 invoke_rcu_core();
1609 }
1610
1611 /*
1612  * Keep a running count of the number of non-lazy callbacks posted
1613  * on this CPU.  This running counter (which is never decremented) allows
1614  * rcu_prepare_for_idle() to detect when something out of the idle loop
1615  * posts a callback, even if an equal number of callbacks are invoked.
1616  * Of course, callbacks should only be posted from within a trace event
1617  * designed to be called from idle or from within RCU_NONIDLE().
1618  */
1619 static void rcu_idle_count_callbacks_posted(void)
1620 {
1621         __this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
1622 }
1623
1624 /*
1625  * Data for flushing lazy RCU callbacks at OOM time.
1626  */
1627 static atomic_t oom_callback_count;
1628 static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq);
1629
1630 /*
1631  * RCU OOM callback -- decrement the outstanding count and deliver the
1632  * wake-up if we are the last one.
1633  */
1634 static void rcu_oom_callback(struct rcu_head *rhp)
1635 {
1636         if (atomic_dec_and_test(&oom_callback_count))
1637                 wake_up(&oom_callback_wq);
1638 }
1639
1640 /*
1641  * Post an rcu_oom_notify callback on the current CPU if it has at
1642  * least one lazy callback.  This will unnecessarily post callbacks
1643  * to CPUs that already have a non-lazy callback at the end of their
1644  * callback list, but this is an infrequent operation, so accept some
1645  * extra overhead to keep things simple.
1646  */
1647 static void rcu_oom_notify_cpu(void *unused)
1648 {
1649         struct rcu_state *rsp;
1650         struct rcu_data *rdp;
1651
1652         for_each_rcu_flavor(rsp) {
1653                 rdp = raw_cpu_ptr(rsp->rda);
1654                 if (rdp->qlen_lazy != 0) {
1655                         atomic_inc(&oom_callback_count);
1656                         rsp->call(&rdp->oom_head, rcu_oom_callback);
1657                 }
1658         }
1659 }
1660
1661 /*
1662  * If low on memory, ensure that each CPU has a non-lazy callback.
1663  * This will wake up CPUs that have only lazy callbacks, in turn
1664  * ensuring that they free up the corresponding memory in a timely manner.
1665  * Because an uncertain amount of memory will be freed in some uncertain
1666  * timeframe, we do not claim to have freed anything.
1667  */
1668 static int rcu_oom_notify(struct notifier_block *self,
1669                           unsigned long notused, void *nfreed)
1670 {
1671         int cpu;
1672
1673         /* Wait for callbacks from earlier instance to complete. */
1674         wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0);
1675         smp_mb(); /* Ensure callback reuse happens after callback invocation. */
1676
1677         /*
1678          * Prevent premature wakeup: ensure that all increments happen
1679          * before there is a chance of the counter reaching zero.
1680          */
1681         atomic_set(&oom_callback_count, 1);
1682
1683         get_online_cpus();
1684         for_each_online_cpu(cpu) {
1685                 smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1);
1686                 cond_resched_rcu_qs();
1687         }
1688         put_online_cpus();
1689
1690         /* Unconditionally decrement: no need to wake ourselves up. */
1691         atomic_dec(&oom_callback_count);
1692
1693         return NOTIFY_OK;
1694 }
1695
1696 static struct notifier_block rcu_oom_nb = {
1697         .notifier_call = rcu_oom_notify
1698 };
1699
1700 static int __init rcu_register_oom_notifier(void)
1701 {
1702         register_oom_notifier(&rcu_oom_nb);
1703         return 0;
1704 }
1705 early_initcall(rcu_register_oom_notifier);
1706
1707 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1708
1709 #ifdef CONFIG_RCU_CPU_STALL_INFO
1710
1711 #ifdef CONFIG_RCU_FAST_NO_HZ
1712
1713 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
1714 {
1715         struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1716         unsigned long nlpd = rdtp->nonlazy_posted - rdtp->nonlazy_posted_snap;
1717
1718         sprintf(cp, "last_accelerate: %04lx/%04lx, nonlazy_posted: %ld, %c%c",
1719                 rdtp->last_accelerate & 0xffff, jiffies & 0xffff,
1720                 ulong2long(nlpd),
1721                 rdtp->all_lazy ? 'L' : '.',
1722                 rdtp->tick_nohz_enabled_snap ? '.' : 'D');
1723 }
1724
1725 #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
1726
1727 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
1728 {
1729         *cp = '\0';
1730 }
1731
1732 #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
1733
1734 /* Initiate the stall-info list. */
1735 static void print_cpu_stall_info_begin(void)
1736 {
1737         pr_cont("\n");
1738 }
1739
1740 /*
1741  * Print out diagnostic information for the specified stalled CPU.
1742  *
1743  * If the specified CPU is aware of the current RCU grace period
1744  * (flavor specified by rsp), then print the number of scheduling
1745  * clock interrupts the CPU has taken during the time that it has
1746  * been aware.  Otherwise, print the number of RCU grace periods
1747  * that this CPU is ignorant of, for example, "1" if the CPU was
1748  * aware of the previous grace period.
1749  *
1750  * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
1751  */
1752 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
1753 {
1754         char fast_no_hz[72];
1755         struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1756         struct rcu_dynticks *rdtp = rdp->dynticks;
1757         char *ticks_title;
1758         unsigned long ticks_value;
1759
1760         if (rsp->gpnum == rdp->gpnum) {
1761                 ticks_title = "ticks this GP";
1762                 ticks_value = rdp->ticks_this_gp;
1763         } else {
1764                 ticks_title = "GPs behind";
1765                 ticks_value = rsp->gpnum - rdp->gpnum;
1766         }
1767         print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
1768         pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n",
1769                cpu, ticks_value, ticks_title,
1770                atomic_read(&rdtp->dynticks) & 0xfff,
1771                rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
1772                rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
1773                READ_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
1774                fast_no_hz);
1775 }
1776
1777 /* Terminate the stall-info list. */
1778 static void print_cpu_stall_info_end(void)
1779 {
1780         pr_err("\t");
1781 }
1782
1783 /* Zero ->ticks_this_gp for all flavors of RCU. */
1784 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
1785 {
1786         rdp->ticks_this_gp = 0;
1787         rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
1788 }
1789
1790 /* Increment ->ticks_this_gp for all flavors of RCU. */
1791 static void increment_cpu_stall_ticks(void)
1792 {
1793         struct rcu_state *rsp;
1794
1795         for_each_rcu_flavor(rsp)
1796                 raw_cpu_inc(rsp->rda->ticks_this_gp);
1797 }
1798
1799 #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
1800
1801 static void print_cpu_stall_info_begin(void)
1802 {
1803         pr_cont(" {");
1804 }
1805
1806 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
1807 {
1808         pr_cont(" %d", cpu);
1809 }
1810
1811 static void print_cpu_stall_info_end(void)
1812 {
1813         pr_cont("} ");
1814 }
1815
1816 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
1817 {
1818 }
1819
1820 static void increment_cpu_stall_ticks(void)
1821 {
1822 }
1823
1824 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
1825
1826 #ifdef CONFIG_RCU_NOCB_CPU
1827
1828 /*
1829  * Offload callback processing from the boot-time-specified set of CPUs
1830  * specified by rcu_nocb_mask.  For each CPU in the set, there is a
1831  * kthread created that pulls the callbacks from the corresponding CPU,
1832  * waits for a grace period to elapse, and invokes the callbacks.
1833  * The no-CBs CPUs do a wake_up() on their kthread when they insert
1834  * a callback into any empty list, unless the rcu_nocb_poll boot parameter
1835  * has been specified, in which case each kthread actively polls its
1836  * CPU.  (Which isn't so great for energy efficiency, but which does
1837  * reduce RCU's overhead on that CPU.)
1838  *
1839  * This is intended to be used in conjunction with Frederic Weisbecker's
1840  * adaptive-idle work, which would seriously reduce OS jitter on CPUs
1841  * running CPU-bound user-mode computations.
1842  *
1843  * Offloading of callback processing could also in theory be used as
1844  * an energy-efficiency measure because CPUs with no RCU callbacks
1845  * queued are more aggressive about entering dyntick-idle mode.
1846  */
1847
1848
1849 /* Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. */
1850 static int __init rcu_nocb_setup(char *str)
1851 {
1852         alloc_bootmem_cpumask_var(&rcu_nocb_mask);
1853         have_rcu_nocb_mask = true;
1854         cpulist_parse(str, rcu_nocb_mask);
1855         return 1;
1856 }
1857 __setup("rcu_nocbs=", rcu_nocb_setup);
1858
1859 static int __init parse_rcu_nocb_poll(char *arg)
1860 {
1861         rcu_nocb_poll = 1;
1862         return 0;
1863 }
1864 early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
1865
1866 /*
1867  * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
1868  * grace period.
1869  */
1870 static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
1871 {
1872         wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
1873 }
1874
1875 /*
1876  * Set the root rcu_node structure's ->need_future_gp field
1877  * based on the sum of those of all rcu_node structures.  This does
1878  * double-count the root rcu_node structure's requests, but this
1879  * is necessary to handle the possibility of a rcu_nocb_kthread()
1880  * having awakened during the time that the rcu_node structures
1881  * were being updated for the end of the previous grace period.
1882  */
1883 static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
1884 {
1885         rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq;
1886 }
1887
1888 static void rcu_init_one_nocb(struct rcu_node *rnp)
1889 {
1890         init_waitqueue_head(&rnp->nocb_gp_wq[0]);
1891         init_waitqueue_head(&rnp->nocb_gp_wq[1]);
1892 }
1893
1894 #ifndef CONFIG_RCU_NOCB_CPU_ALL
1895 /* Is the specified CPU a no-CBs CPU? */
1896 bool rcu_is_nocb_cpu(int cpu)
1897 {
1898         if (have_rcu_nocb_mask)
1899                 return cpumask_test_cpu(cpu, rcu_nocb_mask);
1900         return false;
1901 }
1902 #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
1903
1904 /*
1905  * Kick the leader kthread for this NOCB group.
1906  */
1907 static void wake_nocb_leader(struct rcu_data *rdp, bool force)
1908 {
1909         struct rcu_data *rdp_leader = rdp->nocb_leader;
1910
1911         if (!READ_ONCE(rdp_leader->nocb_kthread))
1912                 return;
1913         if (READ_ONCE(rdp_leader->nocb_leader_sleep) || force) {
1914                 /* Prior smp_mb__after_atomic() orders against prior enqueue. */
1915                 WRITE_ONCE(rdp_leader->nocb_leader_sleep, false);
1916                 wake_up(&rdp_leader->nocb_wq);
1917         }
1918 }
1919
1920 /*
1921  * Does the specified CPU need an RCU callback for the specified flavor
1922  * of rcu_barrier()?
1923  */
1924 static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
1925 {
1926         struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1927         unsigned long ret;
1928 #ifdef CONFIG_PROVE_RCU
1929         struct rcu_head *rhp;
1930 #endif /* #ifdef CONFIG_PROVE_RCU */
1931
1932         /*
1933          * Check count of all no-CBs callbacks awaiting invocation.
1934          * There needs to be a barrier before this function is called,
1935          * but associated with a prior determination that no more
1936          * callbacks would be posted.  In the worst case, the first
1937          * barrier in _rcu_barrier() suffices (but the caller cannot
1938          * necessarily rely on this, not a substitute for the caller
1939          * getting the concurrency design right!).  There must also be
1940          * a barrier between the following load an posting of a callback
1941          * (if a callback is in fact needed).  This is associated with an
1942          * atomic_inc() in the caller.
1943          */
1944         ret = atomic_long_read(&rdp->nocb_q_count);
1945
1946 #ifdef CONFIG_PROVE_RCU
1947         rhp = READ_ONCE(rdp->nocb_head);
1948         if (!rhp)
1949                 rhp = READ_ONCE(rdp->nocb_gp_head);
1950         if (!rhp)
1951                 rhp = READ_ONCE(rdp->nocb_follower_head);
1952
1953         /* Having no rcuo kthread but CBs after scheduler starts is bad! */
1954         if (!READ_ONCE(rdp->nocb_kthread) && rhp &&
1955             rcu_scheduler_fully_active) {
1956                 /* RCU callback enqueued before CPU first came online??? */
1957                 pr_err("RCU: Never-onlined no-CBs CPU %d has CB %p\n",
1958                        cpu, rhp->func);
1959                 WARN_ON_ONCE(1);
1960         }
1961 #endif /* #ifdef CONFIG_PROVE_RCU */
1962
1963         return !!ret;
1964 }
1965
1966 /*
1967  * Enqueue the specified string of rcu_head structures onto the specified
1968  * CPU's no-CBs lists.  The CPU is specified by rdp, the head of the
1969  * string by rhp, and the tail of the string by rhtp.  The non-lazy/lazy
1970  * counts are supplied by rhcount and rhcount_lazy.
1971  *
1972  * If warranted, also wake up the kthread servicing this CPUs queues.
1973  */
1974 static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
1975                                     struct rcu_head *rhp,
1976                                     struct rcu_head **rhtp,
1977                                     int rhcount, int rhcount_lazy,
1978                                     unsigned long flags)
1979 {
1980         int len;
1981         struct rcu_head **old_rhpp;
1982         struct task_struct *t;
1983
1984         /* Enqueue the callback on the nocb list and update counts. */
1985         atomic_long_add(rhcount, &rdp->nocb_q_count);
1986         /* rcu_barrier() relies on ->nocb_q_count add before xchg. */
1987         old_rhpp = xchg(&rdp->nocb_tail, rhtp);
1988         WRITE_ONCE(*old_rhpp, rhp);
1989         atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
1990         smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
1991
1992         /* If we are not being polled and there is a kthread, awaken it ... */
1993         t = READ_ONCE(rdp->nocb_kthread);
1994         if (rcu_nocb_poll || !t) {
1995                 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
1996                                     TPS("WakeNotPoll"));
1997                 return;
1998         }
1999         len = atomic_long_read(&rdp->nocb_q_count);
2000         if (old_rhpp == &rdp->nocb_head) {
2001                 if (!irqs_disabled_flags(flags)) {
2002                         /* ... if queue was empty ... */
2003                         wake_nocb_leader(rdp, false);
2004                         trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2005                                             TPS("WakeEmpty"));
2006                 } else {
2007                         rdp->nocb_defer_wakeup = RCU_NOGP_WAKE;
2008                         trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2009                                             TPS("WakeEmptyIsDeferred"));
2010                 }
2011                 rdp->qlen_last_fqs_check = 0;
2012         } else if (len > rdp->qlen_last_fqs_check + qhimark) {
2013                 /* ... or if many callbacks queued. */
2014                 if (!irqs_disabled_flags(flags)) {
2015                         wake_nocb_leader(rdp, true);
2016                         trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2017                                             TPS("WakeOvf"));
2018                 } else {
2019                         rdp->nocb_defer_wakeup = RCU_NOGP_WAKE_FORCE;
2020                         trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2021                                             TPS("WakeOvfIsDeferred"));
2022                 }
2023                 rdp->qlen_last_fqs_check = LONG_MAX / 2;
2024         } else {
2025                 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeNot"));
2026         }
2027         return;
2028 }
2029
2030 /*
2031  * This is a helper for __call_rcu(), which invokes this when the normal
2032  * callback queue is inoperable.  If this is not a no-CBs CPU, this
2033  * function returns failure back to __call_rcu(), which can complain
2034  * appropriately.
2035  *
2036  * Otherwise, this function queues the callback where the corresponding
2037  * "rcuo" kthread can find it.
2038  */
2039 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2040                             bool lazy, unsigned long flags)
2041 {
2042
2043         if (!rcu_is_nocb_cpu(rdp->cpu))
2044                 return false;
2045         __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags);
2046         if (__is_kfree_rcu_offset((unsigned long)rhp->func))
2047                 trace_rcu_kfree_callback(rdp->rsp->name, rhp,
2048                                          (unsigned long)rhp->func,
2049                                          -atomic_long_read(&rdp->nocb_q_count_lazy),
2050                                          -atomic_long_read(&rdp->nocb_q_count));
2051         else
2052                 trace_rcu_callback(rdp->rsp->name, rhp,
2053                                    -atomic_long_read(&rdp->nocb_q_count_lazy),
2054                                    -atomic_long_read(&rdp->nocb_q_count));
2055
2056         /*
2057          * If called from an extended quiescent state with interrupts
2058          * disabled, invoke the RCU core in order to allow the idle-entry
2059          * deferred-wakeup check to function.
2060          */
2061         if (irqs_disabled_flags(flags) &&
2062             !rcu_is_watching() &&
2063             cpu_online(smp_processor_id()))
2064                 invoke_rcu_core();
2065
2066         return true;
2067 }
2068
2069 /*
2070  * Adopt orphaned callbacks on a no-CBs CPU, or return 0 if this is
2071  * not a no-CBs CPU.
2072  */
2073 static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2074                                                      struct rcu_data *rdp,
2075                                                      unsigned long flags)
2076 {
2077         long ql = rsp->qlen;
2078         long qll = rsp->qlen_lazy;
2079
2080         /* If this is not a no-CBs CPU, tell the caller to do it the old way. */
2081         if (!rcu_is_nocb_cpu(smp_processor_id()))
2082                 return false;
2083         rsp->qlen = 0;
2084         rsp->qlen_lazy = 0;
2085
2086         /* First, enqueue the donelist, if any.  This preserves CB ordering. */
2087         if (rsp->orphan_donelist != NULL) {
2088                 __call_rcu_nocb_enqueue(rdp, rsp->orphan_donelist,
2089                                         rsp->orphan_donetail, ql, qll, flags);
2090                 ql = qll = 0;
2091                 rsp->orphan_donelist = NULL;
2092                 rsp->orphan_donetail = &rsp->orphan_donelist;
2093         }
2094         if (rsp->orphan_nxtlist != NULL) {
2095                 __call_rcu_nocb_enqueue(rdp, rsp->orphan_nxtlist,
2096                                         rsp->orphan_nxttail, ql, qll, flags);
2097                 ql = qll = 0;
2098                 rsp->orphan_nxtlist = NULL;
2099                 rsp->orphan_nxttail = &rsp->orphan_nxtlist;
2100         }
2101         return true;
2102 }
2103
2104 /*
2105  * If necessary, kick off a new grace period, and either way wait
2106  * for a subsequent grace period to complete.
2107  */
2108 static void rcu_nocb_wait_gp(struct rcu_data *rdp)
2109 {
2110         unsigned long c;
2111         bool d;
2112         unsigned long flags;
2113         bool needwake;
2114         struct rcu_node *rnp = rdp->mynode;
2115
2116         raw_spin_lock_irqsave(&rnp->lock, flags);
2117         smp_mb__after_unlock_lock();
2118         needwake = rcu_start_future_gp(rnp, rdp, &c);
2119         raw_spin_unlock_irqrestore(&rnp->lock, flags);
2120         if (needwake)
2121                 rcu_gp_kthread_wake(rdp->rsp);
2122
2123         /*
2124          * Wait for the grace period.  Do so interruptibly to avoid messing
2125          * up the load average.
2126          */
2127         trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait"));
2128         for (;;) {
2129                 wait_event_interruptible(
2130                         rnp->nocb_gp_wq[c & 0x1],
2131                         (d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c)));
2132                 if (likely(d))
2133                         break;
2134                 WARN_ON(signal_pending(current));
2135                 trace_rcu_future_gp(rnp, rdp, c, TPS("ResumeWait"));
2136         }
2137         trace_rcu_future_gp(rnp, rdp, c, TPS("EndWait"));
2138         smp_mb(); /* Ensure that CB invocation happens after GP end. */
2139 }
2140
2141 /*
2142  * Leaders come here to wait for additional callbacks to show up.
2143  * This function does not return until callbacks appear.
2144  */
2145 static void nocb_leader_wait(struct rcu_data *my_rdp)
2146 {
2147         bool firsttime = true;
2148         bool gotcbs;
2149         struct rcu_data *rdp;
2150         struct rcu_head **tail;
2151
2152 wait_again:
2153
2154         /* Wait for callbacks to appear. */
2155         if (!rcu_nocb_poll) {
2156                 trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep");
2157                 wait_event_interruptible(my_rdp->nocb_wq,
2158                                 !READ_ONCE(my_rdp->nocb_leader_sleep));
2159                 /* Memory barrier handled by smp_mb() calls below and repoll. */
2160         } else if (firsttime) {
2161                 firsttime = false; /* Don't drown trace log with "Poll"! */
2162                 trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Poll");
2163         }
2164
2165         /*
2166          * Each pass through the following loop checks a follower for CBs.
2167          * We are our own first follower.  Any CBs found are moved to
2168          * nocb_gp_head, where they await a grace period.
2169          */
2170         gotcbs = false;
2171         for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
2172                 rdp->nocb_gp_head = READ_ONCE(rdp->nocb_head);
2173                 if (!rdp->nocb_gp_head)
2174                         continue;  /* No CBs here, try next follower. */
2175
2176                 /* Move callbacks to wait-for-GP list, which is empty. */
2177                 WRITE_ONCE(rdp->nocb_head, NULL);
2178                 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
2179                 gotcbs = true;
2180         }
2181
2182         /*
2183          * If there were no callbacks, sleep a bit, rescan after a
2184          * memory barrier, and go retry.
2185          */
2186         if (unlikely(!gotcbs)) {
2187                 if (!rcu_nocb_poll)
2188                         trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu,
2189                                             "WokeEmpty");
2190                 WARN_ON(signal_pending(current));
2191                 schedule_timeout_interruptible(1);
2192
2193                 /* Rescan in case we were a victim of memory ordering. */
2194                 my_rdp->nocb_leader_sleep = true;
2195                 smp_mb();  /* Ensure _sleep true before scan. */
2196                 for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower)
2197                         if (READ_ONCE(rdp->nocb_head)) {
2198                                 /* Found CB, so short-circuit next wait. */
2199                                 my_rdp->nocb_leader_sleep = false;
2200                                 break;
2201                         }
2202                 goto wait_again;
2203         }
2204
2205         /* Wait for one grace period. */
2206         rcu_nocb_wait_gp(my_rdp);
2207
2208         /*
2209          * We left ->nocb_leader_sleep unset to reduce cache thrashing.
2210          * We set it now, but recheck for new callbacks while
2211          * traversing our follower list.
2212          */
2213         my_rdp->nocb_leader_sleep = true;
2214         smp_mb(); /* Ensure _sleep true before scan of ->nocb_head. */
2215
2216         /* Each pass through the following loop wakes a follower, if needed. */
2217         for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
2218                 if (READ_ONCE(rdp->nocb_head))
2219                         my_rdp->nocb_leader_sleep = false;/* No need to sleep.*/
2220                 if (!rdp->nocb_gp_head)
2221                         continue; /* No CBs, so no need to wake follower. */
2222
2223                 /* Append callbacks to follower's "done" list. */
2224                 tail = xchg(&rdp->nocb_follower_tail, rdp->nocb_gp_tail);
2225                 *tail = rdp->nocb_gp_head;
2226                 smp_mb__after_atomic(); /* Store *tail before wakeup. */
2227                 if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
2228                         /*
2229                          * List was empty, wake up the follower.
2230                          * Memory barriers supplied by atomic_long_add().
2231                          */
2232                         wake_up(&rdp->nocb_wq);
2233                 }
2234         }
2235
2236         /* If we (the leader) don't have CBs, go wait some more. */
2237         if (!my_rdp->nocb_follower_head)
2238                 goto wait_again;
2239 }
2240
2241 /*
2242  * Followers come here to wait for additional callbacks to show up.
2243  * This function does not return until callbacks appear.
2244  */
2245 static void nocb_follower_wait(struct rcu_data *rdp)
2246 {
2247         bool firsttime = true;
2248
2249         for (;;) {
2250                 if (!rcu_nocb_poll) {
2251                         trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2252                                             "FollowerSleep");
2253                         wait_event_interruptible(rdp->nocb_wq,
2254                                                  READ_ONCE(rdp->nocb_follower_head));
2255                 } else if (firsttime) {
2256                         /* Don't drown trace log with "Poll"! */
2257                         firsttime = false;
2258                         trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "Poll");
2259                 }
2260                 if (smp_load_acquire(&rdp->nocb_follower_head)) {
2261                         /* ^^^ Ensure CB invocation follows _head test. */
2262                         return;
2263                 }
2264                 if (!rcu_nocb_poll)
2265                         trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2266                                             "WokeEmpty");
2267                 WARN_ON(signal_pending(current));
2268                 schedule_timeout_interruptible(1);
2269         }
2270 }
2271
2272 /*
2273  * Per-rcu_data kthread, but only for no-CBs CPUs.  Each kthread invokes
2274  * callbacks queued by the corresponding no-CBs CPU, however, there is
2275  * an optional leader-follower relationship so that the grace-period
2276  * kthreads don't have to do quite so many wakeups.
2277  */
2278 static int rcu_nocb_kthread(void *arg)
2279 {
2280         int c, cl;
2281         struct rcu_head *list;
2282         struct rcu_head *next;
2283         struct rcu_head **tail;
2284         struct rcu_data *rdp = arg;
2285
2286         /* Each pass through this loop invokes one batch of callbacks */
2287         for (;;) {
2288                 /* Wait for callbacks. */
2289                 if (rdp->nocb_leader == rdp)
2290                         nocb_leader_wait(rdp);
2291                 else
2292                         nocb_follower_wait(rdp);
2293
2294                 /* Pull the ready-to-invoke callbacks onto local list. */
2295                 list = READ_ONCE(rdp->nocb_follower_head);
2296                 BUG_ON(!list);
2297                 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
2298                 WRITE_ONCE(rdp->nocb_follower_head, NULL);
2299                 tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
2300
2301                 /* Each pass through the following loop invokes a callback. */
2302                 trace_rcu_batch_start(rdp->rsp->name,
2303                                       atomic_long_read(&rdp->nocb_q_count_lazy),
2304                                       atomic_long_read(&rdp->nocb_q_count), -1);
2305                 c = cl = 0;
2306                 while (list) {
2307                         next = list->next;
2308                         /* Wait for enqueuing to complete, if needed. */
2309                         while (next == NULL && &list->next != tail) {
2310                                 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2311                                                     TPS("WaitQueue"));
2312                                 schedule_timeout_interruptible(1);
2313                                 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2314                                                     TPS("WokeQueue"));
2315                                 next = list->next;
2316                         }
2317                         debug_rcu_head_unqueue(list);
2318                         local_bh_disable();
2319                         if (__rcu_reclaim(rdp->rsp->name, list))
2320                                 cl++;
2321                         c++;
2322                         local_bh_enable();
2323                         list = next;
2324                 }
2325                 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
2326                 smp_mb__before_atomic();  /* _add after CB invocation. */
2327                 atomic_long_add(-c, &rdp->nocb_q_count);
2328                 atomic_long_add(-cl, &rdp->nocb_q_count_lazy);
2329                 rdp->n_nocbs_invoked += c;
2330         }
2331         return 0;
2332 }
2333
2334 /* Is a deferred wakeup of rcu_nocb_kthread() required? */
2335 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
2336 {
2337         return READ_ONCE(rdp->nocb_defer_wakeup);
2338 }
2339
2340 /* Do a deferred wakeup of rcu_nocb_kthread(). */
2341 static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
2342 {
2343         int ndw;
2344
2345         if (!rcu_nocb_need_deferred_wakeup(rdp))
2346                 return;
2347         ndw = READ_ONCE(rdp->nocb_defer_wakeup);
2348         WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOGP_WAKE_NOT);
2349         wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE);
2350         trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
2351 }
2352
2353 void __init rcu_init_nohz(void)
2354 {
2355         int cpu;
2356         bool need_rcu_nocb_mask = true;
2357         struct rcu_state *rsp;
2358
2359 #ifdef CONFIG_RCU_NOCB_CPU_NONE
2360         need_rcu_nocb_mask = false;
2361 #endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */
2362
2363 #if defined(CONFIG_NO_HZ_FULL)
2364         if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
2365                 need_rcu_nocb_mask = true;
2366 #endif /* #if defined(CONFIG_NO_HZ_FULL) */
2367
2368         if (!have_rcu_nocb_mask && need_rcu_nocb_mask) {
2369                 if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
2370                         pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
2371                         return;
2372                 }
2373                 have_rcu_nocb_mask = true;
2374         }
2375         if (!have_rcu_nocb_mask)
2376                 return;
2377
2378 #ifdef CONFIG_RCU_NOCB_CPU_ZERO
2379         pr_info("\tOffload RCU callbacks from CPU 0\n");
2380         cpumask_set_cpu(0, rcu_nocb_mask);
2381 #endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */
2382 #ifdef CONFIG_RCU_NOCB_CPU_ALL
2383         pr_info("\tOffload RCU callbacks from all CPUs\n");
2384         cpumask_copy(rcu_nocb_mask, cpu_possible_mask);
2385 #endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */
2386 #if defined(CONFIG_NO_HZ_FULL)
2387         if (tick_nohz_full_running)
2388                 cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
2389 #endif /* #if defined(CONFIG_NO_HZ_FULL) */
2390
2391         if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
2392                 pr_info("\tNote: kernel parameter 'rcu_nocbs=' contains nonexistent CPUs.\n");
2393                 cpumask_and(rcu_nocb_mask, cpu_possible_mask,
2394                             rcu_nocb_mask);
2395         }
2396         pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n",
2397                 cpumask_pr_args(rcu_nocb_mask));
2398         if (rcu_nocb_poll)
2399                 pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
2400
2401         for_each_rcu_flavor(rsp) {
2402                 for_each_cpu(cpu, rcu_nocb_mask)
2403                         init_nocb_callback_list(per_cpu_ptr(rsp->rda, cpu));
2404                 rcu_organize_nocb_kthreads(rsp);
2405         }
2406 }
2407
2408 /* Initialize per-rcu_data variables for no-CBs CPUs. */
2409 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2410 {
2411         rdp->nocb_tail = &rdp->nocb_head;
2412         init_waitqueue_head(&rdp->nocb_wq);
2413         rdp->nocb_follower_tail = &rdp->nocb_follower_head;
2414 }
2415
2416 /*
2417  * If the specified CPU is a no-CBs CPU that does not already have its
2418  * rcuo kthread for the specified RCU flavor, spawn it.  If the CPUs are
2419  * brought online out of order, this can require re-organizing the
2420  * leader-follower relationships.
2421  */
2422 static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
2423 {
2424         struct rcu_data *rdp;
2425         struct rcu_data *rdp_last;
2426         struct rcu_data *rdp_old_leader;
2427         struct rcu_data *rdp_spawn = per_cpu_ptr(rsp->rda, cpu);
2428         struct task_struct *t;
2429
2430         /*
2431          * If this isn't a no-CBs CPU or if it already has an rcuo kthread,
2432          * then nothing to do.
2433          */
2434         if (!rcu_is_nocb_cpu(cpu) || rdp_spawn->nocb_kthread)
2435                 return;
2436
2437         /* If we didn't spawn the leader first, reorganize! */
2438         rdp_old_leader = rdp_spawn->nocb_leader;
2439         if (rdp_old_leader != rdp_spawn && !rdp_old_leader->nocb_kthread) {
2440                 rdp_last = NULL;
2441                 rdp = rdp_old_leader;
2442                 do {
2443                         rdp->nocb_leader = rdp_spawn;
2444                         if (rdp_last && rdp != rdp_spawn)
2445                                 rdp_last->nocb_next_follower = rdp;
2446                         if (rdp == rdp_spawn) {
2447                                 rdp = rdp->nocb_next_follower;
2448                         } else {
2449                                 rdp_last = rdp;
2450                                 rdp = rdp->nocb_next_follower;
2451                                 rdp_last->nocb_next_follower = NULL;
2452                         }
2453                 } while (rdp);
2454                 rdp_spawn->nocb_next_follower = rdp_old_leader;
2455         }
2456
2457         /* Spawn the kthread for this CPU and RCU flavor. */
2458         t = kthread_run(rcu_nocb_kthread, rdp_spawn,
2459                         "rcuo%c/%d", rsp->abbr, cpu);
2460         BUG_ON(IS_ERR(t));
2461         WRITE_ONCE(rdp_spawn->nocb_kthread, t);
2462 }
2463
2464 /*
2465  * If the specified CPU is a no-CBs CPU that does not already have its
2466  * rcuo kthreads, spawn them.
2467  */
2468 static void rcu_spawn_all_nocb_kthreads(int cpu)
2469 {
2470         struct rcu_state *rsp;
2471
2472         if (rcu_scheduler_fully_active)
2473                 for_each_rcu_flavor(rsp)
2474                         rcu_spawn_one_nocb_kthread(rsp, cpu);
2475 }
2476
2477 /*
2478  * Once the scheduler is running, spawn rcuo kthreads for all online
2479  * no-CBs CPUs.  This assumes that the early_initcall()s happen before
2480  * non-boot CPUs come online -- if this changes, we will need to add
2481  * some mutual exclusion.
2482  */
2483 static void __init rcu_spawn_nocb_kthreads(void)
2484 {
2485         int cpu;
2486
2487         for_each_online_cpu(cpu)
2488                 rcu_spawn_all_nocb_kthreads(cpu);
2489 }
2490
2491 /* How many follower CPU IDs per leader?  Default of -1 for sqrt(nr_cpu_ids). */
2492 static int rcu_nocb_leader_stride = -1;
2493 module_param(rcu_nocb_leader_stride, int, 0444);
2494
2495 /*
2496  * Initialize leader-follower relationships for all no-CBs CPU.
2497  */
2498 static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp)
2499 {
2500         int cpu;
2501         int ls = rcu_nocb_leader_stride;
2502         int nl = 0;  /* Next leader. */
2503         struct rcu_data *rdp;
2504         struct rcu_data *rdp_leader = NULL;  /* Suppress misguided gcc warn. */
2505         struct rcu_data *rdp_prev = NULL;
2506
2507         if (!have_rcu_nocb_mask)
2508                 return;
2509         if (ls == -1) {
2510                 ls = int_sqrt(nr_cpu_ids);
2511                 rcu_nocb_leader_stride = ls;
2512         }
2513
2514         /*
2515          * Each pass through this loop sets up one rcu_data structure and
2516          * spawns one rcu_nocb_kthread().
2517          */
2518         for_each_cpu(cpu, rcu_nocb_mask) {
2519                 rdp = per_cpu_ptr(rsp->rda, cpu);
2520                 if (rdp->cpu >= nl) {
2521                         /* New leader, set up for followers & next leader. */
2522                         nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
2523                         rdp->nocb_leader = rdp;
2524                         rdp_leader = rdp;
2525                 } else {
2526                         /* Another follower, link to previous leader. */
2527                         rdp->nocb_leader = rdp_leader;
2528                         rdp_prev->nocb_next_follower = rdp;
2529                 }
2530                 rdp_prev = rdp;
2531         }
2532 }
2533
2534 /* Prevent __call_rcu() from enqueuing callbacks on no-CBs CPUs */
2535 static bool init_nocb_callback_list(struct rcu_data *rdp)
2536 {
2537         if (!rcu_is_nocb_cpu(rdp->cpu))
2538                 return false;
2539
2540         /* If there are early-boot callbacks, move them to nocb lists. */
2541         if (rdp->nxtlist) {
2542                 rdp->nocb_head = rdp->nxtlist;
2543                 rdp->nocb_tail = rdp->nxttail[RCU_NEXT_TAIL];
2544                 atomic_long_set(&rdp->nocb_q_count, rdp->qlen);
2545                 atomic_long_set(&rdp->nocb_q_count_lazy, rdp->qlen_lazy);
2546                 rdp->nxtlist = NULL;
2547                 rdp->qlen = 0;
2548                 rdp->qlen_lazy = 0;
2549         }
2550         rdp->nxttail[RCU_NEXT_TAIL] = NULL;
2551         return true;
2552 }
2553
2554 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
2555
2556 static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
2557 {
2558         WARN_ON_ONCE(1); /* Should be dead code. */
2559         return false;
2560 }
2561
2562 static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
2563 {
2564 }
2565
2566 static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
2567 {
2568 }
2569
2570 static void rcu_init_one_nocb(struct rcu_node *rnp)
2571 {
2572 }
2573
2574 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2575                             bool lazy, unsigned long flags)
2576 {
2577         return false;
2578 }
2579
2580 static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2581                                                      struct rcu_data *rdp,
2582                                                      unsigned long flags)
2583 {
2584         return false;
2585 }
2586
2587 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2588 {
2589 }
2590
2591 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
2592 {
2593         return false;
2594 }
2595
2596 static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
2597 {
2598 }
2599
2600 static void rcu_spawn_all_nocb_kthreads(int cpu)
2601 {
2602 }
2603
2604 static void __init rcu_spawn_nocb_kthreads(void)
2605 {
2606 }
2607
2608 static bool init_nocb_callback_list(struct rcu_data *rdp)
2609 {
2610         return false;
2611 }
2612
2613 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
2614
2615 /*
2616  * An adaptive-ticks CPU can potentially execute in kernel mode for an
2617  * arbitrarily long period of time with the scheduling-clock tick turned
2618  * off.  RCU will be paying attention to this CPU because it is in the
2619  * kernel, but the CPU cannot be guaranteed to be executing the RCU state
2620  * machine because the scheduling-clock tick has been disabled.  Therefore,
2621  * if an adaptive-ticks CPU is failing to respond to the current grace
2622  * period and has not be idle from an RCU perspective, kick it.
2623  */
2624 static void __maybe_unused rcu_kick_nohz_cpu(int cpu)
2625 {
2626 #ifdef CONFIG_NO_HZ_FULL
2627         if (tick_nohz_full_cpu(cpu))
2628                 smp_send_reschedule(cpu);
2629 #endif /* #ifdef CONFIG_NO_HZ_FULL */
2630 }
2631
2632
2633 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
2634
2635 static int full_sysidle_state;          /* Current system-idle state. */
2636 #define RCU_SYSIDLE_NOT         0       /* Some CPU is not idle. */
2637 #define RCU_SYSIDLE_SHORT       1       /* All CPUs idle for brief period. */
2638 #define RCU_SYSIDLE_LONG        2       /* All CPUs idle for long enough. */
2639 #define RCU_SYSIDLE_FULL        3       /* All CPUs idle, ready for sysidle. */
2640 #define RCU_SYSIDLE_FULL_NOTED  4       /* Actually entered sysidle state. */
2641
2642 /*
2643  * Invoked to note exit from irq or task transition to idle.  Note that
2644  * usermode execution does -not- count as idle here!  After all, we want
2645  * to detect full-system idle states, not RCU quiescent states and grace
2646  * periods.  The caller must have disabled interrupts.
2647  */
2648 static void rcu_sysidle_enter(int irq)
2649 {
2650         unsigned long j;
2651         struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
2652
2653         /* If there are no nohz_full= CPUs, no need to track this. */
2654         if (!tick_nohz_full_enabled())
2655                 return;
2656
2657         /* Adjust nesting, check for fully idle. */
2658         if (irq) {
2659                 rdtp->dynticks_idle_nesting--;
2660                 WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0);
2661                 if (rdtp->dynticks_idle_nesting != 0)
2662                         return;  /* Still not fully idle. */
2663         } else {
2664                 if ((rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) ==
2665                     DYNTICK_TASK_NEST_VALUE) {
2666                         rdtp->dynticks_idle_nesting = 0;
2667                 } else {
2668                         rdtp->dynticks_idle_nesting -= DYNTICK_TASK_NEST_VALUE;
2669                         WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0);
2670                         return;  /* Still not fully idle. */
2671                 }
2672         }
2673
2674         /* Record start of fully idle period. */
2675         j = jiffies;
2676         WRITE_ONCE(rdtp->dynticks_idle_jiffies, j);
2677         smp_mb__before_atomic();
2678         atomic_inc(&rdtp->dynticks_idle);
2679         smp_mb__after_atomic();
2680         WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
2681 }
2682
2683 /*
2684  * Unconditionally force exit from full system-idle state.  This is
2685  * invoked when a normal CPU exits idle, but must be called separately
2686  * for the timekeeping CPU (tick_do_timer_cpu).  The reason for this
2687  * is that the timekeeping CPU is permitted to take scheduling-clock
2688  * interrupts while the system is in system-idle state, and of course
2689  * rcu_sysidle_exit() has no way of distinguishing a scheduling-clock
2690  * interrupt from any other type of interrupt.
2691  */
2692 void rcu_sysidle_force_exit(void)
2693 {
2694         int oldstate = READ_ONCE(full_sysidle_state);
2695         int newoldstate;
2696
2697         /*
2698          * Each pass through the following loop attempts to exit full
2699          * system-idle state.  If contention proves to be a problem,
2700          * a trylock-based contention tree could be used here.
2701          */
2702         while (oldstate > RCU_SYSIDLE_SHORT) {
2703                 newoldstate = cmpxchg(&full_sysidle_state,
2704                                       oldstate, RCU_SYSIDLE_NOT);
2705                 if (oldstate == newoldstate &&
2706                     oldstate == RCU_SYSIDLE_FULL_NOTED) {
2707                         rcu_kick_nohz_cpu(tick_do_timer_cpu);
2708                         return; /* We cleared it, done! */
2709                 }
2710                 oldstate = newoldstate;
2711         }
2712         smp_mb(); /* Order initial oldstate fetch vs. later non-idle work. */
2713 }
2714
2715 /*
2716  * Invoked to note entry to irq or task transition from idle.  Note that
2717  * usermode execution does -not- count as idle here!  The caller must
2718  * have disabled interrupts.
2719  */
2720 static void rcu_sysidle_exit(int irq)
2721 {
2722         struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
2723
2724         /* If there are no nohz_full= CPUs, no need to track this. */
2725         if (!tick_nohz_full_enabled())
2726                 return;
2727
2728         /* Adjust nesting, check for already non-idle. */
2729         if (irq) {
2730                 rdtp->dynticks_idle_nesting++;
2731                 WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0);
2732                 if (rdtp->dynticks_idle_nesting != 1)
2733                         return; /* Already non-idle. */
2734         } else {
2735                 /*
2736                  * Allow for irq misnesting.  Yes, it really is possible
2737                  * to enter an irq handler then never leave it, and maybe
2738                  * also vice versa.  Handle both possibilities.
2739                  */
2740                 if (rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) {
2741                         rdtp->dynticks_idle_nesting += DYNTICK_TASK_NEST_VALUE;
2742                         WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0);
2743                         return; /* Already non-idle. */
2744                 } else {
2745                         rdtp->dynticks_idle_nesting = DYNTICK_TASK_EXIT_IDLE;
2746                 }
2747         }
2748
2749         /* Record end of idle period. */
2750         smp_mb__before_atomic();
2751         atomic_inc(&rdtp->dynticks_idle);
2752         smp_mb__after_atomic();
2753         WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
2754
2755         /*
2756          * If we are the timekeeping CPU, we are permitted to be non-idle
2757          * during a system-idle state.  This must be the case, because
2758          * the timekeeping CPU has to take scheduling-clock interrupts
2759          * during the time that the system is transitioning to full
2760          * system-idle state.  This means that the timekeeping CPU must
2761          * invoke rcu_sysidle_force_exit() directly if it does anything
2762          * more than take a scheduling-clock interrupt.
2763          */
2764         if (smp_processor_id() == tick_do_timer_cpu)
2765                 return;
2766
2767         /* Update system-idle state: We are clearly no longer fully idle! */
2768         rcu_sysidle_force_exit();
2769 }
2770
2771 /*
2772  * Check to see if the current CPU is idle.  Note that usermode execution
2773  * does not count as idle.  The caller must have disabled interrupts,
2774  * and must be running on tick_do_timer_cpu.
2775  */
2776 static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
2777                                   unsigned long *maxj)
2778 {
2779         int cur;
2780         unsigned long j;
2781         struct rcu_dynticks *rdtp = rdp->dynticks;
2782
2783         /* If there are no nohz_full= CPUs, don't check system-wide idleness. */
2784         if (!tick_nohz_full_enabled())
2785                 return;
2786
2787         /*
2788          * If some other CPU has already reported non-idle, if this is
2789          * not the flavor of RCU that tracks sysidle state, or if this
2790          * is an offline or the timekeeping CPU, nothing to do.
2791          */
2792         if (!*isidle || rdp->rsp != rcu_state_p ||
2793             cpu_is_offline(rdp->cpu) || rdp->cpu == tick_do_timer_cpu)
2794                 return;
2795         /* Verify affinity of current kthread. */
2796         WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
2797
2798         /* Pick up current idle and NMI-nesting counter and check. */
2799         cur = atomic_read(&rdtp->dynticks_idle);
2800         if (cur & 0x1) {
2801                 *isidle = false; /* We are not idle! */
2802                 return;
2803         }
2804         smp_mb(); /* Read counters before timestamps. */
2805
2806         /* Pick up timestamps. */
2807         j = READ_ONCE(rdtp->dynticks_idle_jiffies);
2808         /* If this CPU entered idle more recently, update maxj timestamp. */
2809         if (ULONG_CMP_LT(*maxj, j))
2810                 *maxj = j;
2811 }
2812
2813 /*
2814  * Is this the flavor of RCU that is handling full-system idle?
2815  */
2816 static bool is_sysidle_rcu_state(struct rcu_state *rsp)
2817 {
2818         return rsp == rcu_state_p;
2819 }
2820
2821 /*
2822  * Return a delay in jiffies based on the number of CPUs, rcu_node
2823  * leaf fanout, and jiffies tick rate.  The idea is to allow larger
2824  * systems more time to transition to full-idle state in order to
2825  * avoid the cache thrashing that otherwise occur on the state variable.
2826  * Really small systems (less than a couple of tens of CPUs) should
2827  * instead use a single global atomically incremented counter, and later
2828  * versions of this will automatically reconfigure themselves accordingly.
2829  */
2830 static unsigned long rcu_sysidle_delay(void)
2831 {
2832         if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL)
2833                 return 0;
2834         return DIV_ROUND_UP(nr_cpu_ids * HZ, rcu_fanout_leaf * 1000);
2835 }
2836
2837 /*
2838  * Advance the full-system-idle state.  This is invoked when all of
2839  * the non-timekeeping CPUs are idle.
2840  */
2841 static void rcu_sysidle(unsigned long j)
2842 {
2843         /* Check the current state. */
2844         switch (READ_ONCE(full_sysidle_state)) {
2845         case RCU_SYSIDLE_NOT:
2846
2847                 /* First time all are idle, so note a short idle period. */
2848                 WRITE_ONCE(full_sysidle_state, RCU_SYSIDLE_SHORT);
2849                 break;
2850
2851         case RCU_SYSIDLE_SHORT:
2852
2853                 /*
2854                  * Idle for a bit, time to advance to next state?
2855                  * cmpxchg failure means race with non-idle, let them win.
2856                  */
2857                 if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay()))
2858                         (void)cmpxchg(&full_sysidle_state,
2859                                       RCU_SYSIDLE_SHORT, RCU_SYSIDLE_LONG);
2860                 break;
2861
2862         case RCU_SYSIDLE_LONG:
2863
2864                 /*
2865                  * Do an additional check pass before advancing to full.
2866                  * cmpxchg failure means race with non-idle, let them win.
2867                  */
2868                 if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay()))
2869                         (void)cmpxchg(&full_sysidle_state,
2870                                       RCU_SYSIDLE_LONG, RCU_SYSIDLE_FULL);
2871                 break;
2872
2873         default:
2874                 break;
2875         }
2876 }
2877
2878 /*
2879  * Found a non-idle non-timekeeping CPU, so kick the system-idle state
2880  * back to the beginning.
2881  */
2882 static void rcu_sysidle_cancel(void)
2883 {
2884         smp_mb();
2885         if (full_sysidle_state > RCU_SYSIDLE_SHORT)
2886                 WRITE_ONCE(full_sysidle_state, RCU_SYSIDLE_NOT);
2887 }
2888
2889 /*
2890  * Update the sysidle state based on the results of a force-quiescent-state
2891  * scan of the CPUs' dyntick-idle state.
2892  */
2893 static void rcu_sysidle_report(struct rcu_state *rsp, int isidle,
2894                                unsigned long maxj, bool gpkt)
2895 {
2896         if (rsp != rcu_state_p)
2897                 return;  /* Wrong flavor, ignore. */
2898         if (gpkt && nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL)
2899                 return;  /* Running state machine from timekeeping CPU. */
2900         if (isidle)
2901                 rcu_sysidle(maxj);    /* More idle! */
2902         else
2903                 rcu_sysidle_cancel(); /* Idle is over. */
2904 }
2905
2906 /*
2907  * Wrapper for rcu_sysidle_report() when called from the grace-period
2908  * kthread's context.
2909  */
2910 static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
2911                                   unsigned long maxj)
2912 {
2913         /* If there are no nohz_full= CPUs, no need to track this. */
2914         if (!tick_nohz_full_enabled())
2915                 return;
2916
2917         rcu_sysidle_report(rsp, isidle, maxj, true);
2918 }
2919
2920 /* Callback and function for forcing an RCU grace period. */
2921 struct rcu_sysidle_head {
2922         struct rcu_head rh;
2923         int inuse;
2924 };
2925
2926 static void rcu_sysidle_cb(struct rcu_head *rhp)
2927 {
2928         struct rcu_sysidle_head *rshp;
2929
2930         /*
2931          * The following memory barrier is needed to replace the
2932          * memory barriers that would normally be in the memory
2933          * allocator.
2934          */
2935         smp_mb();  /* grace period precedes setting inuse. */
2936
2937         rshp = container_of(rhp, struct rcu_sysidle_head, rh);
2938         WRITE_ONCE(rshp->inuse, 0);
2939 }
2940
2941 /*
2942  * Check to see if the system is fully idle, other than the timekeeping CPU.
2943  * The caller must have disabled interrupts.  This is not intended to be
2944  * called unless tick_nohz_full_enabled().
2945  */
2946 bool rcu_sys_is_idle(void)
2947 {
2948         static struct rcu_sysidle_head rsh;
2949         int rss = READ_ONCE(full_sysidle_state);
2950
2951         if (WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu))
2952                 return false;
2953
2954         /* Handle small-system case by doing a full scan of CPUs. */
2955         if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) {
2956                 int oldrss = rss - 1;
2957
2958                 /*
2959                  * One pass to advance to each state up to _FULL.
2960                  * Give up if any pass fails to advance the state.
2961                  */
2962                 while (rss < RCU_SYSIDLE_FULL && oldrss < rss) {
2963                         int cpu;
2964                         bool isidle = true;
2965                         unsigned long maxj = jiffies - ULONG_MAX / 4;
2966                         struct rcu_data *rdp;
2967
2968                         /* Scan all the CPUs looking for nonidle CPUs. */
2969                         for_each_possible_cpu(cpu) {
2970                                 rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
2971                                 rcu_sysidle_check_cpu(rdp, &isidle, &maxj);
2972                                 if (!isidle)
2973                                         break;
2974                         }
2975                         rcu_sysidle_report(rcu_state_p, isidle, maxj, false);
2976                         oldrss = rss;
2977                         rss = READ_ONCE(full_sysidle_state);
2978                 }
2979         }
2980
2981         /* If this is the first observation of an idle period, record it. */
2982         if (rss == RCU_SYSIDLE_FULL) {
2983                 rss = cmpxchg(&full_sysidle_state,
2984                               RCU_SYSIDLE_FULL, RCU_SYSIDLE_FULL_NOTED);
2985                 return rss == RCU_SYSIDLE_FULL;
2986         }
2987
2988         smp_mb(); /* ensure rss load happens before later caller actions. */
2989
2990         /* If already fully idle, tell the caller (in case of races). */
2991         if (rss == RCU_SYSIDLE_FULL_NOTED)
2992                 return true;
2993
2994         /*
2995          * If we aren't there yet, and a grace period is not in flight,
2996          * initiate a grace period.  Either way, tell the caller that
2997          * we are not there yet.  We use an xchg() rather than an assignment
2998          * to make up for the memory barriers that would otherwise be
2999          * provided by the memory allocator.
3000          */
3001         if (nr_cpu_ids > CONFIG_NO_HZ_FULL_SYSIDLE_SMALL &&
3002             !rcu_gp_in_progress(rcu_state_p) &&
3003             !rsh.inuse && xchg(&rsh.inuse, 1) == 0)
3004                 call_rcu(&rsh.rh, rcu_sysidle_cb);
3005         return false;
3006 }
3007
3008 /*
3009  * Initialize dynticks sysidle state for CPUs coming online.
3010  */
3011 static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
3012 {
3013         rdtp->dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE;
3014 }
3015
3016 #else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
3017
3018 static void rcu_sysidle_enter(int irq)
3019 {
3020 }
3021
3022 static void rcu_sysidle_exit(int irq)
3023 {
3024 }
3025
3026 static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
3027                                   unsigned long *maxj)
3028 {
3029 }
3030
3031 static bool is_sysidle_rcu_state(struct rcu_state *rsp)
3032 {
3033         return false;
3034 }
3035
3036 static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
3037                                   unsigned long maxj)
3038 {
3039 }
3040
3041 static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
3042 {
3043 }
3044
3045 #endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
3046
3047 /*
3048  * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
3049  * grace-period kthread will do force_quiescent_state() processing?
3050  * The idea is to avoid waking up RCU core processing on such a
3051  * CPU unless the grace period has extended for too long.
3052  *
3053  * This code relies on the fact that all NO_HZ_FULL CPUs are also
3054  * CONFIG_RCU_NOCB_CPU CPUs.
3055  */
3056 static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
3057 {
3058 #ifdef CONFIG_NO_HZ_FULL
3059         if (tick_nohz_full_cpu(smp_processor_id()) &&
3060             (!rcu_gp_in_progress(rsp) ||
3061              ULONG_CMP_LT(jiffies, READ_ONCE(rsp->gp_start) + HZ)))
3062                 return true;
3063 #endif /* #ifdef CONFIG_NO_HZ_FULL */
3064         return false;
3065 }
3066
3067 /*
3068  * Bind the grace-period kthread for the sysidle flavor of RCU to the
3069  * timekeeping CPU.
3070  */
3071 static void rcu_bind_gp_kthread(void)
3072 {
3073         int __maybe_unused cpu;
3074
3075         if (!tick_nohz_full_enabled())
3076                 return;
3077 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
3078         cpu = tick_do_timer_cpu;
3079         if (cpu >= 0 && cpu < nr_cpu_ids)
3080                 set_cpus_allowed_ptr(current, cpumask_of(cpu));
3081 #else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
3082         housekeeping_affine(current);
3083 #endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
3084 }
3085
3086 /* Record the current task on dyntick-idle entry. */
3087 static void rcu_dynticks_task_enter(void)
3088 {
3089 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
3090         WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id());
3091 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
3092 }
3093
3094 /* Record no current task on dyntick-idle exit. */
3095 static void rcu_dynticks_task_exit(void)
3096 {
3097 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
3098         WRITE_ONCE(current->rcu_tasks_idle_cpu, -1);
3099 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
3100 }