mirror of
https://github.com/lkl/linux.git
synced 2025-12-19 16:13:19 +09:00
Merge tag 'rcu.2022.03.13a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu
Pull RCU updates from Paul McKenney: - Fix idle detection (Neeraj Upadhyay) and missing access marking detected by KCSAN. - Reduce coupling between rcu_barrier() and CPU-hotplug operations, so that rcu_barrier() no longer needs to do cpus_read_lock(). This may also someday allow system boot to bring CPUs online concurrently. - Enable more aggressive movement to per-CPU queueing when reacting to excessive lock contention due to workloads placing heavy update-side stress on RCU tasks. - Improvements to RCU priority boosting, including changes from Neeraj Upadhyay, Zqiang, and Alison Chaiken. - Various fixes improving test robustness and debug information. - Add tests for SRCU size transitions, further compress torture.sh build products, and improve debug output. - Miscellaneous fixes. * tag 'rcu.2022.03.13a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu: (49 commits) rcu: Replace cpumask_weight with cpumask_empty where appropriate rcu: Remove __read_mostly annotations from rcu_scheduler_active externs rcu: Uninline multi-use function: finish_rcuwait() rcu: Mark writes to the rcu_segcblist structure's ->flags field kasan: Record work creation stack trace with interrupts enabled rcu: Inline __call_rcu() into call_rcu() rcu: Add mutex for rcu boost kthread spawning and affinity setting rcu: Fix description of kvfree_rcu() MAINTAINERS: Add Frederic and Neeraj to their RCU files rcutorture: Provide non-power-of-two Tasks RCU scenarios rcutorture: Test SRCU size transitions torture: Make torture.sh help message match reality rcu-tasks: Set ->percpu_enqueue_shift to zero upon contention rcu-tasks: Use order_base_2() instead of ilog2() rcu: Create and use an rcu_rdp_cpu_online() rcu: Make rcu_barrier() no longer block CPU-hotplug operations rcu: Rework rcu_barrier() and callback-migration logic rcu: Refactor rcu_barrier() empty-list handling rcu: Kill rnp->ofl_seq and use only rcu_state.ofl_lock for exclusion torture: Change KVM environment variable to RCUTORTURE ...
This commit is contained in:
@@ -4504,6 +4504,8 @@
|
|||||||
(the least-favored priority). Otherwise, when
|
(the least-favored priority). Otherwise, when
|
||||||
RCU_BOOST is not set, valid values are 0-99 and
|
RCU_BOOST is not set, valid values are 0-99 and
|
||||||
the default is zero (non-realtime operation).
|
the default is zero (non-realtime operation).
|
||||||
|
When RCU_NOCB_CPU is set, also adjust the
|
||||||
|
priority of NOCB callback kthreads.
|
||||||
|
|
||||||
rcutree.rcu_nocb_gp_stride= [KNL]
|
rcutree.rcu_nocb_gp_stride= [KNL]
|
||||||
Set the number of NOCB callback kthreads in
|
Set the number of NOCB callback kthreads in
|
||||||
|
|||||||
@@ -16324,6 +16324,8 @@ F: tools/testing/selftests/resctrl/
|
|||||||
|
|
||||||
READ-COPY UPDATE (RCU)
|
READ-COPY UPDATE (RCU)
|
||||||
M: "Paul E. McKenney" <paulmck@kernel.org>
|
M: "Paul E. McKenney" <paulmck@kernel.org>
|
||||||
|
M: Frederic Weisbecker <frederic@kernel.org> (kernel/rcu/tree_nocb.h)
|
||||||
|
M: Neeraj Upadhyay <quic_neeraju@quicinc.com> (kernel/rcu/tasks.h)
|
||||||
M: Josh Triplett <josh@joshtriplett.org>
|
M: Josh Triplett <josh@joshtriplett.org>
|
||||||
R: Steven Rostedt <rostedt@goodmis.org>
|
R: Steven Rostedt <rostedt@goodmis.org>
|
||||||
R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
|
R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
|
||||||
|
|||||||
@@ -84,7 +84,7 @@ static inline int rcu_preempt_depth(void)
|
|||||||
|
|
||||||
/* Internal to kernel */
|
/* Internal to kernel */
|
||||||
void rcu_init(void);
|
void rcu_init(void);
|
||||||
extern int rcu_scheduler_active __read_mostly;
|
extern int rcu_scheduler_active;
|
||||||
void rcu_sched_clock_irq(int user);
|
void rcu_sched_clock_irq(int user);
|
||||||
void rcu_report_dead(unsigned int cpu);
|
void rcu_report_dead(unsigned int cpu);
|
||||||
void rcutree_migrate_callbacks(int cpu);
|
void rcutree_migrate_callbacks(int cpu);
|
||||||
@@ -924,7 +924,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
|
|||||||
*
|
*
|
||||||
* kvfree_rcu(ptr);
|
* kvfree_rcu(ptr);
|
||||||
*
|
*
|
||||||
* where @ptr is a pointer to kvfree().
|
* where @ptr is the pointer to be freed by kvfree().
|
||||||
*
|
*
|
||||||
* Please note, head-less way of freeing is permitted to
|
* Please note, head-less way of freeing is permitted to
|
||||||
* use from a context that has to follow might_sleep()
|
* use from a context that has to follow might_sleep()
|
||||||
|
|||||||
@@ -62,7 +62,7 @@ static inline void rcu_irq_exit_check_preempt(void) { }
|
|||||||
void exit_rcu(void);
|
void exit_rcu(void);
|
||||||
|
|
||||||
void rcu_scheduler_starting(void);
|
void rcu_scheduler_starting(void);
|
||||||
extern int rcu_scheduler_active __read_mostly;
|
extern int rcu_scheduler_active;
|
||||||
void rcu_end_inkernel_boot(void);
|
void rcu_end_inkernel_boot(void);
|
||||||
bool rcu_inkernel_boot_has_ended(void);
|
bool rcu_inkernel_boot_has_ended(void);
|
||||||
bool rcu_is_watching(void);
|
bool rcu_is_watching(void);
|
||||||
|
|||||||
@@ -47,11 +47,7 @@ static inline void prepare_to_rcuwait(struct rcuwait *w)
|
|||||||
rcu_assign_pointer(w->task, current);
|
rcu_assign_pointer(w->task, current);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void finish_rcuwait(struct rcuwait *w)
|
extern void finish_rcuwait(struct rcuwait *w);
|
||||||
{
|
|
||||||
rcu_assign_pointer(w->task, NULL);
|
|
||||||
__set_current_state(TASK_RUNNING);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define rcuwait_wait_event(w, condition, state) \
|
#define rcuwait_wait_event(w, condition, state) \
|
||||||
({ \
|
({ \
|
||||||
|
|||||||
@@ -794,16 +794,15 @@ TRACE_EVENT_RCU(rcu_torture_read,
|
|||||||
* Tracepoint for rcu_barrier() execution. The string "s" describes
|
* Tracepoint for rcu_barrier() execution. The string "s" describes
|
||||||
* the rcu_barrier phase:
|
* the rcu_barrier phase:
|
||||||
* "Begin": rcu_barrier() started.
|
* "Begin": rcu_barrier() started.
|
||||||
|
* "CB": An rcu_barrier_callback() invoked a callback, not the last.
|
||||||
* "EarlyExit": rcu_barrier() piggybacked, thus early exit.
|
* "EarlyExit": rcu_barrier() piggybacked, thus early exit.
|
||||||
* "Inc1": rcu_barrier() piggyback check counter incremented.
|
* "Inc1": rcu_barrier() piggyback check counter incremented.
|
||||||
* "OfflineNoCBQ": rcu_barrier() found offline no-CBs CPU with callbacks.
|
* "Inc2": rcu_barrier() piggyback check counter incremented.
|
||||||
* "OnlineQ": rcu_barrier() found online CPU with callbacks.
|
|
||||||
* "OnlineNQ": rcu_barrier() found online CPU, no callbacks.
|
|
||||||
* "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
|
* "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
|
||||||
* "IRQNQ": An rcu_barrier_callback() callback found no callbacks.
|
* "IRQNQ": An rcu_barrier_callback() callback found no callbacks.
|
||||||
* "CB": An rcu_barrier_callback() invoked a callback, not the last.
|
|
||||||
* "LastCB": An rcu_barrier_callback() invoked the last callback.
|
* "LastCB": An rcu_barrier_callback() invoked the last callback.
|
||||||
* "Inc2": rcu_barrier() piggyback check counter incremented.
|
* "NQ": rcu_barrier() found a CPU with no callbacks.
|
||||||
|
* "OnlineQ": rcu_barrier() found online CPU with callbacks.
|
||||||
* The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
|
* The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
|
||||||
* is the count of remaining callbacks, and "done" is the piggybacking count.
|
* is the count of remaining callbacks, and "done" is the piggybacking count.
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -56,13 +56,13 @@ static inline long rcu_segcblist_n_cbs(struct rcu_segcblist *rsclp)
|
|||||||
static inline void rcu_segcblist_set_flags(struct rcu_segcblist *rsclp,
|
static inline void rcu_segcblist_set_flags(struct rcu_segcblist *rsclp,
|
||||||
int flags)
|
int flags)
|
||||||
{
|
{
|
||||||
rsclp->flags |= flags;
|
WRITE_ONCE(rsclp->flags, rsclp->flags | flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void rcu_segcblist_clear_flags(struct rcu_segcblist *rsclp,
|
static inline void rcu_segcblist_clear_flags(struct rcu_segcblist *rsclp,
|
||||||
int flags)
|
int flags)
|
||||||
{
|
{
|
||||||
rsclp->flags &= ~flags;
|
WRITE_ONCE(rsclp->flags, rsclp->flags & ~flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool rcu_segcblist_test_flags(struct rcu_segcblist *rsclp,
|
static inline bool rcu_segcblist_test_flags(struct rcu_segcblist *rsclp,
|
||||||
|
|||||||
@@ -284,7 +284,7 @@ static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
|
|||||||
static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
|
static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
|
||||||
static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
|
static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
|
||||||
|
|
||||||
static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */
|
static atomic_t rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocate an element from the rcu_tortures pool.
|
* Allocate an element from the rcu_tortures pool.
|
||||||
@@ -387,7 +387,7 @@ rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
|
|||||||
* period, and we want a long delay occasionally to trigger
|
* period, and we want a long delay occasionally to trigger
|
||||||
* force_quiescent_state. */
|
* force_quiescent_state. */
|
||||||
|
|
||||||
if (!READ_ONCE(rcu_fwd_cb_nodelay) &&
|
if (!atomic_read(&rcu_fwd_cb_nodelay) &&
|
||||||
!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
|
!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
|
||||||
started = cur_ops->get_gp_seq();
|
started = cur_ops->get_gp_seq();
|
||||||
ts = rcu_trace_clock_local();
|
ts = rcu_trace_clock_local();
|
||||||
@@ -674,6 +674,7 @@ static struct rcu_torture_ops srcu_ops = {
|
|||||||
.call = srcu_torture_call,
|
.call = srcu_torture_call,
|
||||||
.cb_barrier = srcu_torture_barrier,
|
.cb_barrier = srcu_torture_barrier,
|
||||||
.stats = srcu_torture_stats,
|
.stats = srcu_torture_stats,
|
||||||
|
.cbflood_max = 50000,
|
||||||
.irq_capable = 1,
|
.irq_capable = 1,
|
||||||
.no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
|
.no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
|
||||||
.name = "srcu"
|
.name = "srcu"
|
||||||
@@ -708,6 +709,7 @@ static struct rcu_torture_ops srcud_ops = {
|
|||||||
.call = srcu_torture_call,
|
.call = srcu_torture_call,
|
||||||
.cb_barrier = srcu_torture_barrier,
|
.cb_barrier = srcu_torture_barrier,
|
||||||
.stats = srcu_torture_stats,
|
.stats = srcu_torture_stats,
|
||||||
|
.cbflood_max = 50000,
|
||||||
.irq_capable = 1,
|
.irq_capable = 1,
|
||||||
.no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
|
.no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
|
||||||
.name = "srcud"
|
.name = "srcud"
|
||||||
@@ -997,7 +999,7 @@ static int rcu_torture_boost(void *arg)
|
|||||||
goto checkwait;
|
goto checkwait;
|
||||||
|
|
||||||
/* Wait for the next test interval. */
|
/* Wait for the next test interval. */
|
||||||
oldstarttime = boost_starttime;
|
oldstarttime = READ_ONCE(boost_starttime);
|
||||||
while (time_before(jiffies, oldstarttime)) {
|
while (time_before(jiffies, oldstarttime)) {
|
||||||
schedule_timeout_interruptible(oldstarttime - jiffies);
|
schedule_timeout_interruptible(oldstarttime - jiffies);
|
||||||
if (stutter_wait("rcu_torture_boost"))
|
if (stutter_wait("rcu_torture_boost"))
|
||||||
@@ -1041,10 +1043,11 @@ static int rcu_torture_boost(void *arg)
|
|||||||
* interval. Besides, we are running at RT priority,
|
* interval. Besides, we are running at RT priority,
|
||||||
* so delays should be relatively rare.
|
* so delays should be relatively rare.
|
||||||
*/
|
*/
|
||||||
while (oldstarttime == boost_starttime && !kthread_should_stop()) {
|
while (oldstarttime == READ_ONCE(boost_starttime) && !kthread_should_stop()) {
|
||||||
if (mutex_trylock(&boost_mutex)) {
|
if (mutex_trylock(&boost_mutex)) {
|
||||||
if (oldstarttime == boost_starttime) {
|
if (oldstarttime == boost_starttime) {
|
||||||
boost_starttime = jiffies + test_boost_interval * HZ;
|
WRITE_ONCE(boost_starttime,
|
||||||
|
jiffies + test_boost_interval * HZ);
|
||||||
n_rcu_torture_boosts++;
|
n_rcu_torture_boosts++;
|
||||||
}
|
}
|
||||||
mutex_unlock(&boost_mutex);
|
mutex_unlock(&boost_mutex);
|
||||||
@@ -1276,7 +1279,7 @@ rcu_torture_writer(void *arg)
|
|||||||
boot_ended = rcu_inkernel_boot_has_ended();
|
boot_ended = rcu_inkernel_boot_has_ended();
|
||||||
stutter_waited = stutter_wait("rcu_torture_writer");
|
stutter_waited = stutter_wait("rcu_torture_writer");
|
||||||
if (stutter_waited &&
|
if (stutter_waited &&
|
||||||
!READ_ONCE(rcu_fwd_cb_nodelay) &&
|
!atomic_read(&rcu_fwd_cb_nodelay) &&
|
||||||
!cur_ops->slow_gps &&
|
!cur_ops->slow_gps &&
|
||||||
!torture_must_stop() &&
|
!torture_must_stop() &&
|
||||||
boot_ended)
|
boot_ended)
|
||||||
@@ -2180,7 +2183,6 @@ static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
|
|||||||
for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
|
for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
|
||||||
if (rfp->n_launders_hist[i].n_launders > 0)
|
if (rfp->n_launders_hist[i].n_launders > 0)
|
||||||
break;
|
break;
|
||||||
mutex_lock(&rcu_fwd_mutex); // Serialize histograms.
|
|
||||||
pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):",
|
pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):",
|
||||||
__func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat);
|
__func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat);
|
||||||
gps_old = rfp->rcu_launder_gp_seq_start;
|
gps_old = rfp->rcu_launder_gp_seq_start;
|
||||||
@@ -2193,7 +2195,6 @@ static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
|
|||||||
gps_old = gps;
|
gps_old = gps;
|
||||||
}
|
}
|
||||||
pr_cont("\n");
|
pr_cont("\n");
|
||||||
mutex_unlock(&rcu_fwd_mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Callback function for continuous-flood RCU callbacks. */
|
/* Callback function for continuous-flood RCU callbacks. */
|
||||||
@@ -2281,6 +2282,7 @@ static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
|
|||||||
unsigned long stopat;
|
unsigned long stopat;
|
||||||
static DEFINE_TORTURE_RANDOM(trs);
|
static DEFINE_TORTURE_RANDOM(trs);
|
||||||
|
|
||||||
|
pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
|
||||||
if (!cur_ops->sync)
|
if (!cur_ops->sync)
|
||||||
return; // Cannot do need_resched() forward progress testing without ->sync.
|
return; // Cannot do need_resched() forward progress testing without ->sync.
|
||||||
if (cur_ops->call && cur_ops->cb_barrier) {
|
if (cur_ops->call && cur_ops->cb_barrier) {
|
||||||
@@ -2289,7 +2291,7 @@ static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Tight loop containing cond_resched(). */
|
/* Tight loop containing cond_resched(). */
|
||||||
WRITE_ONCE(rcu_fwd_cb_nodelay, true);
|
atomic_inc(&rcu_fwd_cb_nodelay);
|
||||||
cur_ops->sync(); /* Later readers see above write. */
|
cur_ops->sync(); /* Later readers see above write. */
|
||||||
if (selfpropcb) {
|
if (selfpropcb) {
|
||||||
WRITE_ONCE(fcs.stop, 0);
|
WRITE_ONCE(fcs.stop, 0);
|
||||||
@@ -2325,6 +2327,7 @@ static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
|
|||||||
if (selfpropcb) {
|
if (selfpropcb) {
|
||||||
WRITE_ONCE(fcs.stop, 1);
|
WRITE_ONCE(fcs.stop, 1);
|
||||||
cur_ops->sync(); /* Wait for running CB to complete. */
|
cur_ops->sync(); /* Wait for running CB to complete. */
|
||||||
|
pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id);
|
||||||
cur_ops->cb_barrier(); /* Wait for queued callbacks. */
|
cur_ops->cb_barrier(); /* Wait for queued callbacks. */
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2333,7 +2336,7 @@ static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
|
|||||||
destroy_rcu_head_on_stack(&fcs.rh);
|
destroy_rcu_head_on_stack(&fcs.rh);
|
||||||
}
|
}
|
||||||
schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
|
schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
|
||||||
WRITE_ONCE(rcu_fwd_cb_nodelay, false);
|
atomic_dec(&rcu_fwd_cb_nodelay);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Carry out call_rcu() forward-progress testing. */
|
/* Carry out call_rcu() forward-progress testing. */
|
||||||
@@ -2353,13 +2356,14 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
|
|||||||
unsigned long stopat;
|
unsigned long stopat;
|
||||||
unsigned long stoppedat;
|
unsigned long stoppedat;
|
||||||
|
|
||||||
|
pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
|
||||||
if (READ_ONCE(rcu_fwd_emergency_stop))
|
if (READ_ONCE(rcu_fwd_emergency_stop))
|
||||||
return; /* Get out of the way quickly, no GP wait! */
|
return; /* Get out of the way quickly, no GP wait! */
|
||||||
if (!cur_ops->call)
|
if (!cur_ops->call)
|
||||||
return; /* Can't do call_rcu() fwd prog without ->call. */
|
return; /* Can't do call_rcu() fwd prog without ->call. */
|
||||||
|
|
||||||
/* Loop continuously posting RCU callbacks. */
|
/* Loop continuously posting RCU callbacks. */
|
||||||
WRITE_ONCE(rcu_fwd_cb_nodelay, true);
|
atomic_inc(&rcu_fwd_cb_nodelay);
|
||||||
cur_ops->sync(); /* Later readers see above write. */
|
cur_ops->sync(); /* Later readers see above write. */
|
||||||
WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
|
WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
|
||||||
stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
|
stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
|
||||||
@@ -2414,6 +2418,7 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
|
|||||||
n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
|
n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
|
||||||
cver = READ_ONCE(rcu_torture_current_version) - cver;
|
cver = READ_ONCE(rcu_torture_current_version) - cver;
|
||||||
gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
|
gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
|
||||||
|
pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id);
|
||||||
cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
|
cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
|
||||||
(void)rcu_torture_fwd_prog_cbfree(rfp);
|
(void)rcu_torture_fwd_prog_cbfree(rfp);
|
||||||
|
|
||||||
@@ -2427,11 +2432,13 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
|
|||||||
n_launders, n_launders_sa,
|
n_launders, n_launders_sa,
|
||||||
n_max_gps, n_max_cbs, cver, gps);
|
n_max_gps, n_max_cbs, cver, gps);
|
||||||
atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs);
|
atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs);
|
||||||
|
mutex_lock(&rcu_fwd_mutex); // Serialize histograms.
|
||||||
rcu_torture_fwd_cb_hist(rfp);
|
rcu_torture_fwd_cb_hist(rfp);
|
||||||
|
mutex_unlock(&rcu_fwd_mutex);
|
||||||
}
|
}
|
||||||
schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
|
schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
|
||||||
tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
|
tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
|
||||||
WRITE_ONCE(rcu_fwd_cb_nodelay, false);
|
atomic_dec(&rcu_fwd_cb_nodelay);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -2511,7 +2518,7 @@ static int rcu_torture_fwd_prog(void *args)
|
|||||||
firsttime = false;
|
firsttime = false;
|
||||||
WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1);
|
WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1);
|
||||||
} else {
|
} else {
|
||||||
while (READ_ONCE(rcu_fwd_seq) == oldseq)
|
while (READ_ONCE(rcu_fwd_seq) == oldseq && !torture_must_stop())
|
||||||
schedule_timeout_interruptible(1);
|
schedule_timeout_interruptible(1);
|
||||||
oldseq = READ_ONCE(rcu_fwd_seq);
|
oldseq = READ_ONCE(rcu_fwd_seq);
|
||||||
}
|
}
|
||||||
@@ -2905,8 +2912,10 @@ rcu_torture_cleanup(void)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (torture_cleanup_begin()) {
|
if (torture_cleanup_begin()) {
|
||||||
if (cur_ops->cb_barrier != NULL)
|
if (cur_ops->cb_barrier != NULL) {
|
||||||
|
pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
|
||||||
cur_ops->cb_barrier();
|
cur_ops->cb_barrier();
|
||||||
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (!cur_ops) {
|
if (!cur_ops) {
|
||||||
@@ -2961,8 +2970,10 @@ rcu_torture_cleanup(void)
|
|||||||
* Wait for all RCU callbacks to fire, then do torture-type-specific
|
* Wait for all RCU callbacks to fire, then do torture-type-specific
|
||||||
* cleanup operations.
|
* cleanup operations.
|
||||||
*/
|
*/
|
||||||
if (cur_ops->cb_barrier != NULL)
|
if (cur_ops->cb_barrier != NULL) {
|
||||||
|
pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
|
||||||
cur_ops->cb_barrier();
|
cur_ops->cb_barrier();
|
||||||
|
}
|
||||||
if (cur_ops->cleanup != NULL)
|
if (cur_ops->cleanup != NULL)
|
||||||
cur_ops->cleanup();
|
cur_ops->cleanup();
|
||||||
|
|
||||||
|
|||||||
@@ -123,7 +123,7 @@ static struct rcu_tasks rt_name = \
|
|||||||
.call_func = call, \
|
.call_func = call, \
|
||||||
.rtpcpu = &rt_name ## __percpu, \
|
.rtpcpu = &rt_name ## __percpu, \
|
||||||
.name = n, \
|
.name = n, \
|
||||||
.percpu_enqueue_shift = ilog2(CONFIG_NR_CPUS) + 1, \
|
.percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS), \
|
||||||
.percpu_enqueue_lim = 1, \
|
.percpu_enqueue_lim = 1, \
|
||||||
.percpu_dequeue_lim = 1, \
|
.percpu_dequeue_lim = 1, \
|
||||||
.barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \
|
.barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \
|
||||||
@@ -302,7 +302,7 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
|
|||||||
if (unlikely(needadjust)) {
|
if (unlikely(needadjust)) {
|
||||||
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
|
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
|
||||||
if (rtp->percpu_enqueue_lim != nr_cpu_ids) {
|
if (rtp->percpu_enqueue_lim != nr_cpu_ids) {
|
||||||
WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids) + 1);
|
WRITE_ONCE(rtp->percpu_enqueue_shift, 0);
|
||||||
WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids);
|
WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids);
|
||||||
smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids);
|
smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids);
|
||||||
pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
|
pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
|
||||||
@@ -417,7 +417,7 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
|
|||||||
if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
|
if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
|
||||||
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
|
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
|
||||||
if (rtp->percpu_enqueue_lim > 1) {
|
if (rtp->percpu_enqueue_lim > 1) {
|
||||||
WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids) + 1);
|
WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids));
|
||||||
smp_store_release(&rtp->percpu_enqueue_lim, 1);
|
smp_store_release(&rtp->percpu_enqueue_lim, 1);
|
||||||
rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
|
rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
|
||||||
pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);
|
pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);
|
||||||
|
|||||||
@@ -87,11 +87,12 @@ static struct rcu_state rcu_state = {
|
|||||||
.gp_state = RCU_GP_IDLE,
|
.gp_state = RCU_GP_IDLE,
|
||||||
.gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
|
.gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
|
||||||
.barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
|
.barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
|
||||||
|
.barrier_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.barrier_lock),
|
||||||
.name = RCU_NAME,
|
.name = RCU_NAME,
|
||||||
.abbr = RCU_ABBR,
|
.abbr = RCU_ABBR,
|
||||||
.exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
|
.exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
|
||||||
.exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
|
.exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
|
||||||
.ofl_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock),
|
.ofl_lock = __ARCH_SPIN_LOCK_UNLOCKED,
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Dump rcu_node combining tree at boot to verify correct setup. */
|
/* Dump rcu_node combining tree at boot to verify correct setup. */
|
||||||
@@ -153,7 +154,7 @@ static void sync_sched_exp_online_cleanup(int cpu);
|
|||||||
static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
|
static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
|
||||||
static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
|
static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
|
||||||
|
|
||||||
/* rcuc/rcub kthread realtime priority */
|
/* rcuc/rcub/rcuop kthread realtime priority */
|
||||||
static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
|
static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
|
||||||
module_param(kthread_prio, int, 0444);
|
module_param(kthread_prio, int, 0444);
|
||||||
|
|
||||||
@@ -221,6 +222,16 @@ static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
|
|||||||
return READ_ONCE(rnp->qsmaskinitnext);
|
return READ_ONCE(rnp->qsmaskinitnext);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Is the CPU corresponding to the specified rcu_data structure online
|
||||||
|
* from RCU's perspective? This perspective is given by that structure's
|
||||||
|
* ->qsmaskinitnext field rather than by the global cpu_online_mask.
|
||||||
|
*/
|
||||||
|
static bool rcu_rdp_cpu_online(struct rcu_data *rdp)
|
||||||
|
{
|
||||||
|
return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode));
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return true if an RCU grace period is in progress. The READ_ONCE()s
|
* Return true if an RCU grace period is in progress. The READ_ONCE()s
|
||||||
* permit this function to be invoked without holding the root rcu_node
|
* permit this function to be invoked without holding the root rcu_node
|
||||||
@@ -1166,15 +1177,20 @@ void rcu_request_urgent_qs_task(struct task_struct *t)
|
|||||||
bool rcu_lockdep_current_cpu_online(void)
|
bool rcu_lockdep_current_cpu_online(void)
|
||||||
{
|
{
|
||||||
struct rcu_data *rdp;
|
struct rcu_data *rdp;
|
||||||
struct rcu_node *rnp;
|
|
||||||
bool ret = false;
|
bool ret = false;
|
||||||
|
|
||||||
if (in_nmi() || !rcu_scheduler_fully_active)
|
if (in_nmi() || !rcu_scheduler_fully_active)
|
||||||
return true;
|
return true;
|
||||||
preempt_disable_notrace();
|
preempt_disable_notrace();
|
||||||
rdp = this_cpu_ptr(&rcu_data);
|
rdp = this_cpu_ptr(&rcu_data);
|
||||||
rnp = rdp->mynode;
|
/*
|
||||||
if (rdp->grpmask & rcu_rnp_online_cpus(rnp) || READ_ONCE(rnp->ofl_seq) & 0x1)
|
* Strictly, we care here about the case where the current CPU is
|
||||||
|
* in rcu_cpu_starting() and thus has an excuse for rdp->grpmask
|
||||||
|
* not being up to date. So arch_spin_is_locked() might have a
|
||||||
|
* false positive if it's held by some *other* CPU, but that's
|
||||||
|
* OK because that just means a false *negative* on the warning.
|
||||||
|
*/
|
||||||
|
if (rcu_rdp_cpu_online(rdp) || arch_spin_is_locked(&rcu_state.ofl_lock))
|
||||||
ret = true;
|
ret = true;
|
||||||
preempt_enable_notrace();
|
preempt_enable_notrace();
|
||||||
return ret;
|
return ret;
|
||||||
@@ -1259,8 +1275,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
|
|||||||
* For more detail, please refer to the "Hotplug CPU" section
|
* For more detail, please refer to the "Hotplug CPU" section
|
||||||
* of RCU's Requirements documentation.
|
* of RCU's Requirements documentation.
|
||||||
*/
|
*/
|
||||||
if (WARN_ON_ONCE(!(rdp->grpmask & rcu_rnp_online_cpus(rnp)))) {
|
if (WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp))) {
|
||||||
bool onl;
|
|
||||||
struct rcu_node *rnp1;
|
struct rcu_node *rnp1;
|
||||||
|
|
||||||
pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
|
pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
|
||||||
@@ -1269,9 +1284,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
|
|||||||
for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
|
for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
|
||||||
pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
|
pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
|
||||||
__func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
|
__func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
|
||||||
onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
|
|
||||||
pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
|
pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
|
||||||
__func__, rdp->cpu, ".o"[onl],
|
__func__, rdp->cpu, ".o"[rcu_rdp_cpu_online(rdp)],
|
||||||
(long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
|
(long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
|
||||||
(long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
|
(long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
|
||||||
return 1; /* Break things loose after complaining. */
|
return 1; /* Break things loose after complaining. */
|
||||||
@@ -1738,7 +1752,6 @@ static void rcu_strict_gp_boundary(void *unused)
|
|||||||
*/
|
*/
|
||||||
static noinline_for_stack bool rcu_gp_init(void)
|
static noinline_for_stack bool rcu_gp_init(void)
|
||||||
{
|
{
|
||||||
unsigned long firstseq;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned long oldmask;
|
unsigned long oldmask;
|
||||||
unsigned long mask;
|
unsigned long mask;
|
||||||
@@ -1781,22 +1794,17 @@ static noinline_for_stack bool rcu_gp_init(void)
|
|||||||
* of RCU's Requirements documentation.
|
* of RCU's Requirements documentation.
|
||||||
*/
|
*/
|
||||||
WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF);
|
WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF);
|
||||||
|
/* Exclude CPU hotplug operations. */
|
||||||
rcu_for_each_leaf_node(rnp) {
|
rcu_for_each_leaf_node(rnp) {
|
||||||
// Wait for CPU-hotplug operations that might have
|
local_irq_save(flags);
|
||||||
// started before this grace period did.
|
arch_spin_lock(&rcu_state.ofl_lock);
|
||||||
smp_mb(); // Pair with barriers used when updating ->ofl_seq to odd values.
|
raw_spin_lock_rcu_node(rnp);
|
||||||
firstseq = READ_ONCE(rnp->ofl_seq);
|
|
||||||
if (firstseq & 0x1)
|
|
||||||
while (firstseq == READ_ONCE(rnp->ofl_seq))
|
|
||||||
schedule_timeout_idle(1); // Can't wake unless RCU is watching.
|
|
||||||
smp_mb(); // Pair with barriers used when updating ->ofl_seq to even values.
|
|
||||||
raw_spin_lock(&rcu_state.ofl_lock);
|
|
||||||
raw_spin_lock_irq_rcu_node(rnp);
|
|
||||||
if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
|
if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
|
||||||
!rnp->wait_blkd_tasks) {
|
!rnp->wait_blkd_tasks) {
|
||||||
/* Nothing to do on this leaf rcu_node structure. */
|
/* Nothing to do on this leaf rcu_node structure. */
|
||||||
raw_spin_unlock_irq_rcu_node(rnp);
|
raw_spin_unlock_rcu_node(rnp);
|
||||||
raw_spin_unlock(&rcu_state.ofl_lock);
|
arch_spin_unlock(&rcu_state.ofl_lock);
|
||||||
|
local_irq_restore(flags);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1831,8 +1839,9 @@ static noinline_for_stack bool rcu_gp_init(void)
|
|||||||
rcu_cleanup_dead_rnp(rnp);
|
rcu_cleanup_dead_rnp(rnp);
|
||||||
}
|
}
|
||||||
|
|
||||||
raw_spin_unlock_irq_rcu_node(rnp);
|
raw_spin_unlock_rcu_node(rnp);
|
||||||
raw_spin_unlock(&rcu_state.ofl_lock);
|
arch_spin_unlock(&rcu_state.ofl_lock);
|
||||||
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
|
rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
|
||||||
|
|
||||||
@@ -2849,10 +2858,12 @@ static void rcu_cpu_kthread(unsigned int cpu)
|
|||||||
{
|
{
|
||||||
unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
|
unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
|
||||||
char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
|
char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
|
||||||
|
unsigned long *j = this_cpu_ptr(&rcu_data.rcuc_activity);
|
||||||
int spincnt;
|
int spincnt;
|
||||||
|
|
||||||
trace_rcu_utilization(TPS("Start CPU kthread@rcu_run"));
|
trace_rcu_utilization(TPS("Start CPU kthread@rcu_run"));
|
||||||
for (spincnt = 0; spincnt < 10; spincnt++) {
|
for (spincnt = 0; spincnt < 10; spincnt++) {
|
||||||
|
WRITE_ONCE(*j, jiffies);
|
||||||
local_bh_disable();
|
local_bh_disable();
|
||||||
*statusp = RCU_KTHREAD_RUNNING;
|
*statusp = RCU_KTHREAD_RUNNING;
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
@@ -2873,6 +2884,7 @@ static void rcu_cpu_kthread(unsigned int cpu)
|
|||||||
schedule_timeout_idle(2);
|
schedule_timeout_idle(2);
|
||||||
trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
|
trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
|
||||||
*statusp = RCU_KTHREAD_WAITING;
|
*statusp = RCU_KTHREAD_WAITING;
|
||||||
|
WRITE_ONCE(*j, jiffies);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct smp_hotplug_thread rcu_cpu_thread_spec = {
|
static struct smp_hotplug_thread rcu_cpu_thread_spec = {
|
||||||
@@ -2893,7 +2905,7 @@ static int __init rcu_spawn_core_kthreads(void)
|
|||||||
|
|
||||||
for_each_possible_cpu(cpu)
|
for_each_possible_cpu(cpu)
|
||||||
per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
|
per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
|
||||||
if (!IS_ENABLED(CONFIG_RCU_BOOST) && use_softirq)
|
if (use_softirq)
|
||||||
return 0;
|
return 0;
|
||||||
WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
|
WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
|
||||||
"%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
|
"%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
|
||||||
@@ -2994,72 +3006,6 @@ static void check_cb_ovld(struct rcu_data *rdp)
|
|||||||
raw_spin_unlock_rcu_node(rnp);
|
raw_spin_unlock_rcu_node(rnp);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Helper function for call_rcu() and friends. */
|
|
||||||
static void
|
|
||||||
__call_rcu(struct rcu_head *head, rcu_callback_t func)
|
|
||||||
{
|
|
||||||
static atomic_t doublefrees;
|
|
||||||
unsigned long flags;
|
|
||||||
struct rcu_data *rdp;
|
|
||||||
bool was_alldone;
|
|
||||||
|
|
||||||
/* Misaligned rcu_head! */
|
|
||||||
WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
|
|
||||||
|
|
||||||
if (debug_rcu_head_queue(head)) {
|
|
||||||
/*
|
|
||||||
* Probable double call_rcu(), so leak the callback.
|
|
||||||
* Use rcu:rcu_callback trace event to find the previous
|
|
||||||
* time callback was passed to __call_rcu().
|
|
||||||
*/
|
|
||||||
if (atomic_inc_return(&doublefrees) < 4) {
|
|
||||||
pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func);
|
|
||||||
mem_dump_obj(head);
|
|
||||||
}
|
|
||||||
WRITE_ONCE(head->func, rcu_leak_callback);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
head->func = func;
|
|
||||||
head->next = NULL;
|
|
||||||
local_irq_save(flags);
|
|
||||||
kasan_record_aux_stack_noalloc(head);
|
|
||||||
rdp = this_cpu_ptr(&rcu_data);
|
|
||||||
|
|
||||||
/* Add the callback to our list. */
|
|
||||||
if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
|
|
||||||
// This can trigger due to call_rcu() from offline CPU:
|
|
||||||
WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
|
|
||||||
WARN_ON_ONCE(!rcu_is_watching());
|
|
||||||
// Very early boot, before rcu_init(). Initialize if needed
|
|
||||||
// and then drop through to queue the callback.
|
|
||||||
if (rcu_segcblist_empty(&rdp->cblist))
|
|
||||||
rcu_segcblist_init(&rdp->cblist);
|
|
||||||
}
|
|
||||||
|
|
||||||
check_cb_ovld(rdp);
|
|
||||||
if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags))
|
|
||||||
return; // Enqueued onto ->nocb_bypass, so just leave.
|
|
||||||
// If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
|
|
||||||
rcu_segcblist_enqueue(&rdp->cblist, head);
|
|
||||||
if (__is_kvfree_rcu_offset((unsigned long)func))
|
|
||||||
trace_rcu_kvfree_callback(rcu_state.name, head,
|
|
||||||
(unsigned long)func,
|
|
||||||
rcu_segcblist_n_cbs(&rdp->cblist));
|
|
||||||
else
|
|
||||||
trace_rcu_callback(rcu_state.name, head,
|
|
||||||
rcu_segcblist_n_cbs(&rdp->cblist));
|
|
||||||
|
|
||||||
trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
|
|
||||||
|
|
||||||
/* Go handle any RCU core processing required. */
|
|
||||||
if (unlikely(rcu_rdp_is_offloaded(rdp))) {
|
|
||||||
__call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
|
|
||||||
} else {
|
|
||||||
__call_rcu_core(rdp, head, flags);
|
|
||||||
local_irq_restore(flags);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* call_rcu() - Queue an RCU callback for invocation after a grace period.
|
* call_rcu() - Queue an RCU callback for invocation after a grace period.
|
||||||
* @head: structure to be used for queueing the RCU updates.
|
* @head: structure to be used for queueing the RCU updates.
|
||||||
@@ -3102,7 +3048,66 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func)
|
|||||||
*/
|
*/
|
||||||
void call_rcu(struct rcu_head *head, rcu_callback_t func)
|
void call_rcu(struct rcu_head *head, rcu_callback_t func)
|
||||||
{
|
{
|
||||||
__call_rcu(head, func);
|
static atomic_t doublefrees;
|
||||||
|
unsigned long flags;
|
||||||
|
struct rcu_data *rdp;
|
||||||
|
bool was_alldone;
|
||||||
|
|
||||||
|
/* Misaligned rcu_head! */
|
||||||
|
WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
|
||||||
|
|
||||||
|
if (debug_rcu_head_queue(head)) {
|
||||||
|
/*
|
||||||
|
* Probable double call_rcu(), so leak the callback.
|
||||||
|
* Use rcu:rcu_callback trace event to find the previous
|
||||||
|
* time callback was passed to call_rcu().
|
||||||
|
*/
|
||||||
|
if (atomic_inc_return(&doublefrees) < 4) {
|
||||||
|
pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func);
|
||||||
|
mem_dump_obj(head);
|
||||||
|
}
|
||||||
|
WRITE_ONCE(head->func, rcu_leak_callback);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
head->func = func;
|
||||||
|
head->next = NULL;
|
||||||
|
kasan_record_aux_stack_noalloc(head);
|
||||||
|
local_irq_save(flags);
|
||||||
|
rdp = this_cpu_ptr(&rcu_data);
|
||||||
|
|
||||||
|
/* Add the callback to our list. */
|
||||||
|
if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
|
||||||
|
// This can trigger due to call_rcu() from offline CPU:
|
||||||
|
WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
|
||||||
|
WARN_ON_ONCE(!rcu_is_watching());
|
||||||
|
// Very early boot, before rcu_init(). Initialize if needed
|
||||||
|
// and then drop through to queue the callback.
|
||||||
|
if (rcu_segcblist_empty(&rdp->cblist))
|
||||||
|
rcu_segcblist_init(&rdp->cblist);
|
||||||
|
}
|
||||||
|
|
||||||
|
check_cb_ovld(rdp);
|
||||||
|
if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags))
|
||||||
|
return; // Enqueued onto ->nocb_bypass, so just leave.
|
||||||
|
// If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
|
||||||
|
rcu_segcblist_enqueue(&rdp->cblist, head);
|
||||||
|
if (__is_kvfree_rcu_offset((unsigned long)func))
|
||||||
|
trace_rcu_kvfree_callback(rcu_state.name, head,
|
||||||
|
(unsigned long)func,
|
||||||
|
rcu_segcblist_n_cbs(&rdp->cblist));
|
||||||
|
else
|
||||||
|
trace_rcu_callback(rcu_state.name, head,
|
||||||
|
rcu_segcblist_n_cbs(&rdp->cblist));
|
||||||
|
|
||||||
|
trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
|
||||||
|
|
||||||
|
/* Go handle any RCU core processing required. */
|
||||||
|
if (unlikely(rcu_rdp_is_offloaded(rdp))) {
|
||||||
|
__call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
|
||||||
|
} else {
|
||||||
|
__call_rcu_core(rdp, head, flags);
|
||||||
|
local_irq_restore(flags);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(call_rcu);
|
EXPORT_SYMBOL_GPL(call_rcu);
|
||||||
|
|
||||||
@@ -3983,13 +3988,16 @@ static void rcu_barrier_callback(struct rcu_head *rhp)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Called with preemption disabled, and from cross-cpu IRQ context.
|
* If needed, entrain an rcu_barrier() callback on rdp->cblist.
|
||||||
*/
|
*/
|
||||||
static void rcu_barrier_func(void *cpu_in)
|
static void rcu_barrier_entrain(struct rcu_data *rdp)
|
||||||
{
|
{
|
||||||
uintptr_t cpu = (uintptr_t)cpu_in;
|
unsigned long gseq = READ_ONCE(rcu_state.barrier_sequence);
|
||||||
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
unsigned long lseq = READ_ONCE(rdp->barrier_seq_snap);
|
||||||
|
|
||||||
|
lockdep_assert_held(&rcu_state.barrier_lock);
|
||||||
|
if (rcu_seq_state(lseq) || !rcu_seq_state(gseq) || rcu_seq_ctr(lseq) != rcu_seq_ctr(gseq))
|
||||||
|
return;
|
||||||
rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
|
rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
|
||||||
rdp->barrier_head.func = rcu_barrier_callback;
|
rdp->barrier_head.func = rcu_barrier_callback;
|
||||||
debug_rcu_head_queue(&rdp->barrier_head);
|
debug_rcu_head_queue(&rdp->barrier_head);
|
||||||
@@ -3999,10 +4007,26 @@ static void rcu_barrier_func(void *cpu_in)
|
|||||||
atomic_inc(&rcu_state.barrier_cpu_count);
|
atomic_inc(&rcu_state.barrier_cpu_count);
|
||||||
} else {
|
} else {
|
||||||
debug_rcu_head_unqueue(&rdp->barrier_head);
|
debug_rcu_head_unqueue(&rdp->barrier_head);
|
||||||
rcu_barrier_trace(TPS("IRQNQ"), -1,
|
rcu_barrier_trace(TPS("IRQNQ"), -1, rcu_state.barrier_sequence);
|
||||||
rcu_state.barrier_sequence);
|
|
||||||
}
|
}
|
||||||
rcu_nocb_unlock(rdp);
|
rcu_nocb_unlock(rdp);
|
||||||
|
smp_store_release(&rdp->barrier_seq_snap, gseq);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Called with preemption disabled, and from cross-cpu IRQ context.
|
||||||
|
*/
|
||||||
|
static void rcu_barrier_handler(void *cpu_in)
|
||||||
|
{
|
||||||
|
uintptr_t cpu = (uintptr_t)cpu_in;
|
||||||
|
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
|
|
||||||
|
lockdep_assert_irqs_disabled();
|
||||||
|
WARN_ON_ONCE(cpu != rdp->cpu);
|
||||||
|
WARN_ON_ONCE(cpu != smp_processor_id());
|
||||||
|
raw_spin_lock(&rcu_state.barrier_lock);
|
||||||
|
rcu_barrier_entrain(rdp);
|
||||||
|
raw_spin_unlock(&rcu_state.barrier_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -4016,6 +4040,8 @@ static void rcu_barrier_func(void *cpu_in)
|
|||||||
void rcu_barrier(void)
|
void rcu_barrier(void)
|
||||||
{
|
{
|
||||||
uintptr_t cpu;
|
uintptr_t cpu;
|
||||||
|
unsigned long flags;
|
||||||
|
unsigned long gseq;
|
||||||
struct rcu_data *rdp;
|
struct rcu_data *rdp;
|
||||||
unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
|
unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
|
||||||
|
|
||||||
@@ -4026,15 +4052,16 @@ void rcu_barrier(void)
|
|||||||
|
|
||||||
/* Did someone else do our work for us? */
|
/* Did someone else do our work for us? */
|
||||||
if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
|
if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
|
||||||
rcu_barrier_trace(TPS("EarlyExit"), -1,
|
rcu_barrier_trace(TPS("EarlyExit"), -1, rcu_state.barrier_sequence);
|
||||||
rcu_state.barrier_sequence);
|
|
||||||
smp_mb(); /* caller's subsequent code after above check. */
|
smp_mb(); /* caller's subsequent code after above check. */
|
||||||
mutex_unlock(&rcu_state.barrier_mutex);
|
mutex_unlock(&rcu_state.barrier_mutex);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Mark the start of the barrier operation. */
|
/* Mark the start of the barrier operation. */
|
||||||
|
raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
|
||||||
rcu_seq_start(&rcu_state.barrier_sequence);
|
rcu_seq_start(&rcu_state.barrier_sequence);
|
||||||
|
gseq = rcu_state.barrier_sequence;
|
||||||
rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
|
rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -4046,7 +4073,7 @@ void rcu_barrier(void)
|
|||||||
*/
|
*/
|
||||||
init_completion(&rcu_state.barrier_completion);
|
init_completion(&rcu_state.barrier_completion);
|
||||||
atomic_set(&rcu_state.barrier_cpu_count, 2);
|
atomic_set(&rcu_state.barrier_cpu_count, 2);
|
||||||
cpus_read_lock();
|
raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Force each CPU with callbacks to register a new callback.
|
* Force each CPU with callbacks to register a new callback.
|
||||||
@@ -4055,29 +4082,31 @@ void rcu_barrier(void)
|
|||||||
*/
|
*/
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
rdp = per_cpu_ptr(&rcu_data, cpu);
|
rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
if (cpu_is_offline(cpu) &&
|
retry:
|
||||||
!rcu_rdp_is_offloaded(rdp))
|
if (smp_load_acquire(&rdp->barrier_seq_snap) == gseq)
|
||||||
|
continue;
|
||||||
|
raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
|
||||||
|
if (!rcu_segcblist_n_cbs(&rdp->cblist)) {
|
||||||
|
WRITE_ONCE(rdp->barrier_seq_snap, gseq);
|
||||||
|
raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
|
||||||
|
rcu_barrier_trace(TPS("NQ"), cpu, rcu_state.barrier_sequence);
|
||||||
continue;
|
continue;
|
||||||
if (rcu_segcblist_n_cbs(&rdp->cblist) && cpu_online(cpu)) {
|
|
||||||
rcu_barrier_trace(TPS("OnlineQ"), cpu,
|
|
||||||
rcu_state.barrier_sequence);
|
|
||||||
smp_call_function_single(cpu, rcu_barrier_func, (void *)cpu, 1);
|
|
||||||
} else if (rcu_segcblist_n_cbs(&rdp->cblist) &&
|
|
||||||
cpu_is_offline(cpu)) {
|
|
||||||
rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu,
|
|
||||||
rcu_state.barrier_sequence);
|
|
||||||
local_irq_disable();
|
|
||||||
rcu_barrier_func((void *)cpu);
|
|
||||||
local_irq_enable();
|
|
||||||
} else if (cpu_is_offline(cpu)) {
|
|
||||||
rcu_barrier_trace(TPS("OfflineNoCBNoQ"), cpu,
|
|
||||||
rcu_state.barrier_sequence);
|
|
||||||
} else {
|
|
||||||
rcu_barrier_trace(TPS("OnlineNQ"), cpu,
|
|
||||||
rcu_state.barrier_sequence);
|
|
||||||
}
|
}
|
||||||
|
if (!rcu_rdp_cpu_online(rdp)) {
|
||||||
|
rcu_barrier_entrain(rdp);
|
||||||
|
WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
|
||||||
|
raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
|
||||||
|
rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu, rcu_state.barrier_sequence);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
|
||||||
|
if (smp_call_function_single(cpu, rcu_barrier_handler, (void *)cpu, 1)) {
|
||||||
|
schedule_timeout_uninterruptible(1);
|
||||||
|
goto retry;
|
||||||
|
}
|
||||||
|
WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
|
||||||
|
rcu_barrier_trace(TPS("OnlineQ"), cpu, rcu_state.barrier_sequence);
|
||||||
}
|
}
|
||||||
cpus_read_unlock();
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now that we have an rcu_barrier_callback() callback on each
|
* Now that we have an rcu_barrier_callback() callback on each
|
||||||
@@ -4092,6 +4121,12 @@ void rcu_barrier(void)
|
|||||||
/* Mark the end of the barrier operation. */
|
/* Mark the end of the barrier operation. */
|
||||||
rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
|
rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
|
||||||
rcu_seq_end(&rcu_state.barrier_sequence);
|
rcu_seq_end(&rcu_state.barrier_sequence);
|
||||||
|
gseq = rcu_state.barrier_sequence;
|
||||||
|
for_each_possible_cpu(cpu) {
|
||||||
|
rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
|
|
||||||
|
WRITE_ONCE(rdp->barrier_seq_snap, gseq);
|
||||||
|
}
|
||||||
|
|
||||||
/* Other rcu_barrier() invocations can now safely proceed. */
|
/* Other rcu_barrier() invocations can now safely proceed. */
|
||||||
mutex_unlock(&rcu_state.barrier_mutex);
|
mutex_unlock(&rcu_state.barrier_mutex);
|
||||||
@@ -4139,6 +4174,7 @@ rcu_boot_init_percpu_data(int cpu)
|
|||||||
INIT_WORK(&rdp->strict_work, strict_work_handler);
|
INIT_WORK(&rdp->strict_work, strict_work_handler);
|
||||||
WARN_ON_ONCE(rdp->dynticks_nesting != 1);
|
WARN_ON_ONCE(rdp->dynticks_nesting != 1);
|
||||||
WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)));
|
WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)));
|
||||||
|
rdp->barrier_seq_snap = rcu_state.barrier_sequence;
|
||||||
rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
|
rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
|
||||||
rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
|
rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
|
||||||
rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
|
rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
|
||||||
@@ -4286,12 +4322,13 @@ void rcu_cpu_starting(unsigned int cpu)
|
|||||||
|
|
||||||
rnp = rdp->mynode;
|
rnp = rdp->mynode;
|
||||||
mask = rdp->grpmask;
|
mask = rdp->grpmask;
|
||||||
WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
|
local_irq_save(flags);
|
||||||
WARN_ON_ONCE(!(rnp->ofl_seq & 0x1));
|
arch_spin_lock(&rcu_state.ofl_lock);
|
||||||
rcu_dynticks_eqs_online();
|
rcu_dynticks_eqs_online();
|
||||||
smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
|
raw_spin_lock(&rcu_state.barrier_lock);
|
||||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
raw_spin_lock_rcu_node(rnp);
|
||||||
WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
|
WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
|
||||||
|
raw_spin_unlock(&rcu_state.barrier_lock);
|
||||||
newcpu = !(rnp->expmaskinitnext & mask);
|
newcpu = !(rnp->expmaskinitnext & mask);
|
||||||
rnp->expmaskinitnext |= mask;
|
rnp->expmaskinitnext |= mask;
|
||||||
/* Allow lockless access for expedited grace periods. */
|
/* Allow lockless access for expedited grace periods. */
|
||||||
@@ -4303,15 +4340,18 @@ void rcu_cpu_starting(unsigned int cpu)
|
|||||||
|
|
||||||
/* An incoming CPU should never be blocking a grace period. */
|
/* An incoming CPU should never be blocking a grace period. */
|
||||||
if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */
|
if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */
|
||||||
|
/* rcu_report_qs_rnp() *really* wants some flags to restore */
|
||||||
|
unsigned long flags2;
|
||||||
|
|
||||||
|
local_irq_save(flags2);
|
||||||
rcu_disable_urgency_upon_qs(rdp);
|
rcu_disable_urgency_upon_qs(rdp);
|
||||||
/* Report QS -after- changing ->qsmaskinitnext! */
|
/* Report QS -after- changing ->qsmaskinitnext! */
|
||||||
rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
|
rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags2);
|
||||||
} else {
|
} else {
|
||||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
raw_spin_unlock_rcu_node(rnp);
|
||||||
}
|
}
|
||||||
smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
|
arch_spin_unlock(&rcu_state.ofl_lock);
|
||||||
WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
|
local_irq_restore(flags);
|
||||||
WARN_ON_ONCE(rnp->ofl_seq & 0x1);
|
|
||||||
smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
|
smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -4325,7 +4365,7 @@ void rcu_cpu_starting(unsigned int cpu)
|
|||||||
*/
|
*/
|
||||||
void rcu_report_dead(unsigned int cpu)
|
void rcu_report_dead(unsigned int cpu)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags, seq_flags;
|
||||||
unsigned long mask;
|
unsigned long mask;
|
||||||
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
|
struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
|
||||||
@@ -4339,10 +4379,8 @@ void rcu_report_dead(unsigned int cpu)
|
|||||||
|
|
||||||
/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
|
/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
|
||||||
mask = rdp->grpmask;
|
mask = rdp->grpmask;
|
||||||
WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
|
local_irq_save(seq_flags);
|
||||||
WARN_ON_ONCE(!(rnp->ofl_seq & 0x1));
|
arch_spin_lock(&rcu_state.ofl_lock);
|
||||||
smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
|
|
||||||
raw_spin_lock(&rcu_state.ofl_lock);
|
|
||||||
raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
|
raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
|
||||||
rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
|
rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
|
||||||
rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
|
rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
|
||||||
@@ -4353,10 +4391,8 @@ void rcu_report_dead(unsigned int cpu)
|
|||||||
}
|
}
|
||||||
WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
|
WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
|
||||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||||
raw_spin_unlock(&rcu_state.ofl_lock);
|
arch_spin_unlock(&rcu_state.ofl_lock);
|
||||||
smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
|
local_irq_restore(seq_flags);
|
||||||
WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
|
|
||||||
WARN_ON_ONCE(rnp->ofl_seq & 0x1);
|
|
||||||
|
|
||||||
rdp->cpu_started = false;
|
rdp->cpu_started = false;
|
||||||
}
|
}
|
||||||
@@ -4379,7 +4415,9 @@ void rcutree_migrate_callbacks(int cpu)
|
|||||||
rcu_segcblist_empty(&rdp->cblist))
|
rcu_segcblist_empty(&rdp->cblist))
|
||||||
return; /* No callbacks to migrate. */
|
return; /* No callbacks to migrate. */
|
||||||
|
|
||||||
local_irq_save(flags);
|
raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
|
||||||
|
WARN_ON_ONCE(rcu_rdp_cpu_online(rdp));
|
||||||
|
rcu_barrier_entrain(rdp);
|
||||||
my_rdp = this_cpu_ptr(&rcu_data);
|
my_rdp = this_cpu_ptr(&rcu_data);
|
||||||
my_rnp = my_rdp->mynode;
|
my_rnp = my_rdp->mynode;
|
||||||
rcu_nocb_lock(my_rdp); /* irqs already disabled. */
|
rcu_nocb_lock(my_rdp); /* irqs already disabled. */
|
||||||
@@ -4389,10 +4427,10 @@ void rcutree_migrate_callbacks(int cpu)
|
|||||||
needwake = rcu_advance_cbs(my_rnp, rdp) ||
|
needwake = rcu_advance_cbs(my_rnp, rdp) ||
|
||||||
rcu_advance_cbs(my_rnp, my_rdp);
|
rcu_advance_cbs(my_rnp, my_rdp);
|
||||||
rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
|
rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
|
||||||
|
raw_spin_unlock(&rcu_state.barrier_lock); /* irqs remain disabled. */
|
||||||
needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp);
|
needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp);
|
||||||
rcu_segcblist_disable(&rdp->cblist);
|
rcu_segcblist_disable(&rdp->cblist);
|
||||||
WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
|
WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != !rcu_segcblist_n_cbs(&my_rdp->cblist));
|
||||||
!rcu_segcblist_n_cbs(&my_rdp->cblist));
|
|
||||||
if (rcu_rdp_is_offloaded(my_rdp)) {
|
if (rcu_rdp_is_offloaded(my_rdp)) {
|
||||||
raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
|
raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
|
||||||
__call_rcu_nocb_wake(my_rdp, true, flags);
|
__call_rcu_nocb_wake(my_rdp, true, flags);
|
||||||
@@ -4439,26 +4477,10 @@ static int rcu_pm_notify(struct notifier_block *self,
|
|||||||
static int __init rcu_spawn_gp_kthread(void)
|
static int __init rcu_spawn_gp_kthread(void)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int kthread_prio_in = kthread_prio;
|
|
||||||
struct rcu_node *rnp;
|
struct rcu_node *rnp;
|
||||||
struct sched_param sp;
|
struct sched_param sp;
|
||||||
struct task_struct *t;
|
struct task_struct *t;
|
||||||
|
|
||||||
/* Force priority into range. */
|
|
||||||
if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
|
|
||||||
&& IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
|
|
||||||
kthread_prio = 2;
|
|
||||||
else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
|
|
||||||
kthread_prio = 1;
|
|
||||||
else if (kthread_prio < 0)
|
|
||||||
kthread_prio = 0;
|
|
||||||
else if (kthread_prio > 99)
|
|
||||||
kthread_prio = 99;
|
|
||||||
|
|
||||||
if (kthread_prio != kthread_prio_in)
|
|
||||||
pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
|
|
||||||
kthread_prio, kthread_prio_in);
|
|
||||||
|
|
||||||
rcu_scheduler_fully_active = 1;
|
rcu_scheduler_fully_active = 1;
|
||||||
t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
|
t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
|
||||||
if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
|
if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
|
||||||
@@ -4569,6 +4591,7 @@ static void __init rcu_init_one(void)
|
|||||||
init_waitqueue_head(&rnp->exp_wq[2]);
|
init_waitqueue_head(&rnp->exp_wq[2]);
|
||||||
init_waitqueue_head(&rnp->exp_wq[3]);
|
init_waitqueue_head(&rnp->exp_wq[3]);
|
||||||
spin_lock_init(&rnp->exp_lock);
|
spin_lock_init(&rnp->exp_lock);
|
||||||
|
mutex_init(&rnp->boost_kthread_mutex);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -4583,6 +4606,28 @@ static void __init rcu_init_one(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Force priority from the kernel command-line into range.
|
||||||
|
*/
|
||||||
|
static void __init sanitize_kthread_prio(void)
|
||||||
|
{
|
||||||
|
int kthread_prio_in = kthread_prio;
|
||||||
|
|
||||||
|
if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
|
||||||
|
&& IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
|
||||||
|
kthread_prio = 2;
|
||||||
|
else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
|
||||||
|
kthread_prio = 1;
|
||||||
|
else if (kthread_prio < 0)
|
||||||
|
kthread_prio = 0;
|
||||||
|
else if (kthread_prio > 99)
|
||||||
|
kthread_prio = 99;
|
||||||
|
|
||||||
|
if (kthread_prio != kthread_prio_in)
|
||||||
|
pr_alert("%s: Limited prio to %d from %d\n",
|
||||||
|
__func__, kthread_prio, kthread_prio_in);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Compute the rcu_node tree geometry from kernel parameters. This cannot
|
* Compute the rcu_node tree geometry from kernel parameters. This cannot
|
||||||
* replace the definitions in tree.h because those are needed to size
|
* replace the definitions in tree.h because those are needed to size
|
||||||
@@ -4743,6 +4788,7 @@ void __init rcu_init(void)
|
|||||||
|
|
||||||
kfree_rcu_batch_init();
|
kfree_rcu_batch_init();
|
||||||
rcu_bootup_announce();
|
rcu_bootup_announce();
|
||||||
|
sanitize_kthread_prio();
|
||||||
rcu_init_geometry();
|
rcu_init_geometry();
|
||||||
rcu_init_one();
|
rcu_init_one();
|
||||||
if (dump_tree)
|
if (dump_tree)
|
||||||
|
|||||||
@@ -56,8 +56,6 @@ struct rcu_node {
|
|||||||
/* Initialized from ->qsmaskinitnext at the */
|
/* Initialized from ->qsmaskinitnext at the */
|
||||||
/* beginning of each grace period. */
|
/* beginning of each grace period. */
|
||||||
unsigned long qsmaskinitnext;
|
unsigned long qsmaskinitnext;
|
||||||
unsigned long ofl_seq; /* CPU-hotplug operation sequence count. */
|
|
||||||
/* Online CPUs for next grace period. */
|
|
||||||
unsigned long expmask; /* CPUs or groups that need to check in */
|
unsigned long expmask; /* CPUs or groups that need to check in */
|
||||||
/* to allow the current expedited GP */
|
/* to allow the current expedited GP */
|
||||||
/* to complete. */
|
/* to complete. */
|
||||||
@@ -110,6 +108,9 @@ struct rcu_node {
|
|||||||
/* side effect, not as a lock. */
|
/* side effect, not as a lock. */
|
||||||
unsigned long boost_time;
|
unsigned long boost_time;
|
||||||
/* When to start boosting (jiffies). */
|
/* When to start boosting (jiffies). */
|
||||||
|
struct mutex boost_kthread_mutex;
|
||||||
|
/* Exclusion for thread spawning and affinity */
|
||||||
|
/* manipulation. */
|
||||||
struct task_struct *boost_kthread_task;
|
struct task_struct *boost_kthread_task;
|
||||||
/* kthread that takes care of priority */
|
/* kthread that takes care of priority */
|
||||||
/* boosting for this rcu_node structure. */
|
/* boosting for this rcu_node structure. */
|
||||||
@@ -190,6 +191,7 @@ struct rcu_data {
|
|||||||
bool rcu_forced_tick_exp; /* ... provide QS to expedited GP. */
|
bool rcu_forced_tick_exp; /* ... provide QS to expedited GP. */
|
||||||
|
|
||||||
/* 4) rcu_barrier(), OOM callbacks, and expediting. */
|
/* 4) rcu_barrier(), OOM callbacks, and expediting. */
|
||||||
|
unsigned long barrier_seq_snap; /* Snap of rcu_state.barrier_sequence. */
|
||||||
struct rcu_head barrier_head;
|
struct rcu_head barrier_head;
|
||||||
int exp_dynticks_snap; /* Double-check need for IPI. */
|
int exp_dynticks_snap; /* Double-check need for IPI. */
|
||||||
|
|
||||||
@@ -203,6 +205,8 @@ struct rcu_data {
|
|||||||
int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
|
int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
|
||||||
struct timer_list nocb_timer; /* Enforce finite deferral. */
|
struct timer_list nocb_timer; /* Enforce finite deferral. */
|
||||||
unsigned long nocb_gp_adv_time; /* Last call_rcu() CB adv (jiffies). */
|
unsigned long nocb_gp_adv_time; /* Last call_rcu() CB adv (jiffies). */
|
||||||
|
struct mutex nocb_gp_kthread_mutex; /* Exclusion for nocb gp kthread */
|
||||||
|
/* spawning */
|
||||||
|
|
||||||
/* The following fields are used by call_rcu, hence own cacheline. */
|
/* The following fields are used by call_rcu, hence own cacheline. */
|
||||||
raw_spinlock_t nocb_bypass_lock ____cacheline_internodealigned_in_smp;
|
raw_spinlock_t nocb_bypass_lock ____cacheline_internodealigned_in_smp;
|
||||||
@@ -237,6 +241,7 @@ struct rcu_data {
|
|||||||
/* rcuc per-CPU kthread or NULL. */
|
/* rcuc per-CPU kthread or NULL. */
|
||||||
unsigned int rcu_cpu_kthread_status;
|
unsigned int rcu_cpu_kthread_status;
|
||||||
char rcu_cpu_has_work;
|
char rcu_cpu_has_work;
|
||||||
|
unsigned long rcuc_activity;
|
||||||
|
|
||||||
/* 7) Diagnostic data, including RCU CPU stall warnings. */
|
/* 7) Diagnostic data, including RCU CPU stall warnings. */
|
||||||
unsigned int softirq_snap; /* Snapshot of softirq activity. */
|
unsigned int softirq_snap; /* Snapshot of softirq activity. */
|
||||||
@@ -302,9 +307,8 @@ struct rcu_state {
|
|||||||
|
|
||||||
/* The following fields are guarded by the root rcu_node's lock. */
|
/* The following fields are guarded by the root rcu_node's lock. */
|
||||||
|
|
||||||
u8 boost ____cacheline_internodealigned_in_smp;
|
unsigned long gp_seq ____cacheline_internodealigned_in_smp;
|
||||||
/* Subject to priority boost. */
|
/* Grace-period sequence #. */
|
||||||
unsigned long gp_seq; /* Grace-period sequence #. */
|
|
||||||
unsigned long gp_max; /* Maximum GP duration in */
|
unsigned long gp_max; /* Maximum GP duration in */
|
||||||
/* jiffies. */
|
/* jiffies. */
|
||||||
struct task_struct *gp_kthread; /* Task for grace periods. */
|
struct task_struct *gp_kthread; /* Task for grace periods. */
|
||||||
@@ -323,6 +327,8 @@ struct rcu_state {
|
|||||||
/* rcu_barrier(). */
|
/* rcu_barrier(). */
|
||||||
/* End of fields guarded by barrier_mutex. */
|
/* End of fields guarded by barrier_mutex. */
|
||||||
|
|
||||||
|
raw_spinlock_t barrier_lock; /* Protects ->barrier_seq_snap. */
|
||||||
|
|
||||||
struct mutex exp_mutex; /* Serialize expedited GP. */
|
struct mutex exp_mutex; /* Serialize expedited GP. */
|
||||||
struct mutex exp_wake_mutex; /* Serialize wakeup. */
|
struct mutex exp_wake_mutex; /* Serialize wakeup. */
|
||||||
unsigned long expedited_sequence; /* Take a ticket. */
|
unsigned long expedited_sequence; /* Take a ticket. */
|
||||||
@@ -355,7 +361,7 @@ struct rcu_state {
|
|||||||
const char *name; /* Name of structure. */
|
const char *name; /* Name of structure. */
|
||||||
char abbr; /* Abbreviated name. */
|
char abbr; /* Abbreviated name. */
|
||||||
|
|
||||||
raw_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
|
arch_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
|
||||||
/* Synchronize offline with */
|
/* Synchronize offline with */
|
||||||
/* GP pre-initialization. */
|
/* GP pre-initialization. */
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -502,7 +502,8 @@ static void synchronize_rcu_expedited_wait(void)
|
|||||||
if (synchronize_rcu_expedited_wait_once(1))
|
if (synchronize_rcu_expedited_wait_once(1))
|
||||||
return;
|
return;
|
||||||
rcu_for_each_leaf_node(rnp) {
|
rcu_for_each_leaf_node(rnp) {
|
||||||
for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
|
mask = READ_ONCE(rnp->expmask);
|
||||||
|
for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
|
||||||
rdp = per_cpu_ptr(&rcu_data, cpu);
|
rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
if (rdp->rcu_forced_tick_exp)
|
if (rdp->rcu_forced_tick_exp)
|
||||||
continue;
|
continue;
|
||||||
@@ -656,7 +657,7 @@ static void rcu_exp_handler(void *unused)
|
|||||||
*/
|
*/
|
||||||
if (!depth) {
|
if (!depth) {
|
||||||
if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
|
if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
|
||||||
rcu_dynticks_curr_cpu_in_eqs()) {
|
rcu_is_cpu_rrupt_from_idle()) {
|
||||||
rcu_report_exp_rdp(rdp);
|
rcu_report_exp_rdp(rdp);
|
||||||
} else {
|
} else {
|
||||||
WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
|
WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
|
||||||
|
|||||||
@@ -1169,7 +1169,7 @@ void __init rcu_init_nohz(void)
|
|||||||
struct rcu_data *rdp;
|
struct rcu_data *rdp;
|
||||||
|
|
||||||
#if defined(CONFIG_NO_HZ_FULL)
|
#if defined(CONFIG_NO_HZ_FULL)
|
||||||
if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
|
if (tick_nohz_full_running && !cpumask_empty(tick_nohz_full_mask))
|
||||||
need_rcu_nocb_mask = true;
|
need_rcu_nocb_mask = true;
|
||||||
#endif /* #if defined(CONFIG_NO_HZ_FULL) */
|
#endif /* #if defined(CONFIG_NO_HZ_FULL) */
|
||||||
|
|
||||||
@@ -1226,6 +1226,7 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
|
|||||||
raw_spin_lock_init(&rdp->nocb_gp_lock);
|
raw_spin_lock_init(&rdp->nocb_gp_lock);
|
||||||
timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0);
|
timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0);
|
||||||
rcu_cblist_init(&rdp->nocb_bypass);
|
rcu_cblist_init(&rdp->nocb_bypass);
|
||||||
|
mutex_init(&rdp->nocb_gp_kthread_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -1238,6 +1239,7 @@ static void rcu_spawn_cpu_nocb_kthread(int cpu)
|
|||||||
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
struct rcu_data *rdp_gp;
|
struct rcu_data *rdp_gp;
|
||||||
struct task_struct *t;
|
struct task_struct *t;
|
||||||
|
struct sched_param sp;
|
||||||
|
|
||||||
if (!rcu_scheduler_fully_active || !rcu_nocb_is_setup)
|
if (!rcu_scheduler_fully_active || !rcu_nocb_is_setup)
|
||||||
return;
|
return;
|
||||||
@@ -1247,20 +1249,30 @@ static void rcu_spawn_cpu_nocb_kthread(int cpu)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
/* If we didn't spawn the GP kthread first, reorganize! */
|
/* If we didn't spawn the GP kthread first, reorganize! */
|
||||||
|
sp.sched_priority = kthread_prio;
|
||||||
rdp_gp = rdp->nocb_gp_rdp;
|
rdp_gp = rdp->nocb_gp_rdp;
|
||||||
|
mutex_lock(&rdp_gp->nocb_gp_kthread_mutex);
|
||||||
if (!rdp_gp->nocb_gp_kthread) {
|
if (!rdp_gp->nocb_gp_kthread) {
|
||||||
t = kthread_run(rcu_nocb_gp_kthread, rdp_gp,
|
t = kthread_run(rcu_nocb_gp_kthread, rdp_gp,
|
||||||
"rcuog/%d", rdp_gp->cpu);
|
"rcuog/%d", rdp_gp->cpu);
|
||||||
if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__))
|
if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__)) {
|
||||||
|
mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex);
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
WRITE_ONCE(rdp_gp->nocb_gp_kthread, t);
|
WRITE_ONCE(rdp_gp->nocb_gp_kthread, t);
|
||||||
|
if (kthread_prio)
|
||||||
|
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
|
||||||
}
|
}
|
||||||
|
mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex);
|
||||||
|
|
||||||
/* Spawn the kthread for this CPU. */
|
/* Spawn the kthread for this CPU. */
|
||||||
t = kthread_run(rcu_nocb_cb_kthread, rdp,
|
t = kthread_run(rcu_nocb_cb_kthread, rdp,
|
||||||
"rcuo%c/%d", rcu_state.abbr, cpu);
|
"rcuo%c/%d", rcu_state.abbr, cpu);
|
||||||
if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__))
|
if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
if (kthread_prio)
|
||||||
|
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
|
||||||
WRITE_ONCE(rdp->nocb_cb_kthread, t);
|
WRITE_ONCE(rdp->nocb_cb_kthread, t);
|
||||||
WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread);
|
WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread);
|
||||||
}
|
}
|
||||||
@@ -1348,7 +1360,7 @@ static void __init rcu_organize_nocb_kthreads(void)
|
|||||||
*/
|
*/
|
||||||
void rcu_bind_current_to_nocb(void)
|
void rcu_bind_current_to_nocb(void)
|
||||||
{
|
{
|
||||||
if (cpumask_available(rcu_nocb_mask) && cpumask_weight(rcu_nocb_mask))
|
if (cpumask_available(rcu_nocb_mask) && !cpumask_empty(rcu_nocb_mask))
|
||||||
WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask));
|
WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask));
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
|
EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
|
||||||
|
|||||||
@@ -330,7 +330,7 @@ void rcu_note_context_switch(bool preempt)
|
|||||||
* then queue the task as required based on the states
|
* then queue the task as required based on the states
|
||||||
* of any ongoing and expedited grace periods.
|
* of any ongoing and expedited grace periods.
|
||||||
*/
|
*/
|
||||||
WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0);
|
WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp));
|
||||||
WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
|
WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
|
||||||
trace_rcu_preempt_task(rcu_state.name,
|
trace_rcu_preempt_task(rcu_state.name,
|
||||||
t->pid,
|
t->pid,
|
||||||
@@ -556,16 +556,16 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
|
|||||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Unboost if we were boosted. */
|
|
||||||
if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
|
|
||||||
rt_mutex_futex_unlock(&rnp->boost_mtx.rtmutex);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this was the last task on the expedited lists,
|
* If this was the last task on the expedited lists,
|
||||||
* then we need to report up the rcu_node hierarchy.
|
* then we need to report up the rcu_node hierarchy.
|
||||||
*/
|
*/
|
||||||
if (!empty_exp && empty_exp_now)
|
if (!empty_exp && empty_exp_now)
|
||||||
rcu_report_exp_rnp(rnp, true);
|
rcu_report_exp_rnp(rnp, true);
|
||||||
|
|
||||||
|
/* Unboost if we were boosted. */
|
||||||
|
if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
|
||||||
|
rt_mutex_futex_unlock(&rnp->boost_mtx.rtmutex);
|
||||||
} else {
|
} else {
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
@@ -773,7 +773,6 @@ dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
|
|||||||
int cpu;
|
int cpu;
|
||||||
int i;
|
int i;
|
||||||
struct list_head *lhp;
|
struct list_head *lhp;
|
||||||
bool onl;
|
|
||||||
struct rcu_data *rdp;
|
struct rcu_data *rdp;
|
||||||
struct rcu_node *rnp1;
|
struct rcu_node *rnp1;
|
||||||
|
|
||||||
@@ -797,9 +796,8 @@ dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
|
|||||||
pr_cont("\n");
|
pr_cont("\n");
|
||||||
for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
|
for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
|
||||||
rdp = per_cpu_ptr(&rcu_data, cpu);
|
rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
|
|
||||||
pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n",
|
pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n",
|
||||||
cpu, ".o"[onl],
|
cpu, ".o"[rcu_rdp_cpu_online(rdp)],
|
||||||
(long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
|
(long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
|
||||||
(long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
|
(long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
|
||||||
}
|
}
|
||||||
@@ -996,12 +994,15 @@ dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
|
|||||||
*/
|
*/
|
||||||
static void rcu_cpu_kthread_setup(unsigned int cpu)
|
static void rcu_cpu_kthread_setup(unsigned int cpu)
|
||||||
{
|
{
|
||||||
|
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
#ifdef CONFIG_RCU_BOOST
|
#ifdef CONFIG_RCU_BOOST
|
||||||
struct sched_param sp;
|
struct sched_param sp;
|
||||||
|
|
||||||
sp.sched_priority = kthread_prio;
|
sp.sched_priority = kthread_prio;
|
||||||
sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
|
sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
|
||||||
#endif /* #ifdef CONFIG_RCU_BOOST */
|
#endif /* #ifdef CONFIG_RCU_BOOST */
|
||||||
|
|
||||||
|
WRITE_ONCE(rdp->rcuc_activity, jiffies);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_RCU_BOOST
|
#ifdef CONFIG_RCU_BOOST
|
||||||
@@ -1172,15 +1173,14 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
|
|||||||
struct sched_param sp;
|
struct sched_param sp;
|
||||||
struct task_struct *t;
|
struct task_struct *t;
|
||||||
|
|
||||||
|
mutex_lock(&rnp->boost_kthread_mutex);
|
||||||
if (rnp->boost_kthread_task || !rcu_scheduler_fully_active)
|
if (rnp->boost_kthread_task || !rcu_scheduler_fully_active)
|
||||||
return;
|
goto out;
|
||||||
|
|
||||||
rcu_state.boost = 1;
|
|
||||||
|
|
||||||
t = kthread_create(rcu_boost_kthread, (void *)rnp,
|
t = kthread_create(rcu_boost_kthread, (void *)rnp,
|
||||||
"rcub/%d", rnp_index);
|
"rcub/%d", rnp_index);
|
||||||
if (WARN_ON_ONCE(IS_ERR(t)))
|
if (WARN_ON_ONCE(IS_ERR(t)))
|
||||||
return;
|
goto out;
|
||||||
|
|
||||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||||
rnp->boost_kthread_task = t;
|
rnp->boost_kthread_task = t;
|
||||||
@@ -1188,6 +1188,9 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
|
|||||||
sp.sched_priority = kthread_prio;
|
sp.sched_priority = kthread_prio;
|
||||||
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
|
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
|
||||||
wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
|
wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
|
||||||
|
|
||||||
|
out:
|
||||||
|
mutex_unlock(&rnp->boost_kthread_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -1210,14 +1213,16 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
|
|||||||
return;
|
return;
|
||||||
if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
|
if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
|
||||||
return;
|
return;
|
||||||
|
mutex_lock(&rnp->boost_kthread_mutex);
|
||||||
for_each_leaf_node_possible_cpu(rnp, cpu)
|
for_each_leaf_node_possible_cpu(rnp, cpu)
|
||||||
if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
|
if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
|
||||||
cpu != outgoingcpu)
|
cpu != outgoingcpu)
|
||||||
cpumask_set_cpu(cpu, cm);
|
cpumask_set_cpu(cpu, cm);
|
||||||
cpumask_and(cm, cm, housekeeping_cpumask(HK_FLAG_RCU));
|
cpumask_and(cm, cm, housekeeping_cpumask(HK_FLAG_RCU));
|
||||||
if (cpumask_weight(cm) == 0)
|
if (cpumask_empty(cm))
|
||||||
cpumask_copy(cm, housekeeping_cpumask(HK_FLAG_RCU));
|
cpumask_copy(cm, housekeeping_cpumask(HK_FLAG_RCU));
|
||||||
set_cpus_allowed_ptr(t, cm);
|
set_cpus_allowed_ptr(t, cm);
|
||||||
|
mutex_unlock(&rnp->boost_kthread_mutex);
|
||||||
free_cpumask_var(cm);
|
free_cpumask_var(cm);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -379,6 +379,15 @@ static bool rcu_is_gp_kthread_starving(unsigned long *jp)
|
|||||||
return j > 2 * HZ;
|
return j > 2 * HZ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool rcu_is_rcuc_kthread_starving(struct rcu_data *rdp, unsigned long *jp)
|
||||||
|
{
|
||||||
|
unsigned long j = jiffies - READ_ONCE(rdp->rcuc_activity);
|
||||||
|
|
||||||
|
if (jp)
|
||||||
|
*jp = j;
|
||||||
|
return j > 2 * HZ;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Print out diagnostic information for the specified stalled CPU.
|
* Print out diagnostic information for the specified stalled CPU.
|
||||||
*
|
*
|
||||||
@@ -430,6 +439,29 @@ static void print_cpu_stall_info(int cpu)
|
|||||||
falsepositive ? " (false positive?)" : "");
|
falsepositive ? " (false positive?)" : "");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void rcuc_kthread_dump(struct rcu_data *rdp)
|
||||||
|
{
|
||||||
|
int cpu;
|
||||||
|
unsigned long j;
|
||||||
|
struct task_struct *rcuc;
|
||||||
|
|
||||||
|
rcuc = rdp->rcu_cpu_kthread_task;
|
||||||
|
if (!rcuc)
|
||||||
|
return;
|
||||||
|
|
||||||
|
cpu = task_cpu(rcuc);
|
||||||
|
if (cpu_is_offline(cpu) || idle_cpu(cpu))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (!rcu_is_rcuc_kthread_starving(rdp, &j))
|
||||||
|
return;
|
||||||
|
|
||||||
|
pr_err("%s kthread starved for %ld jiffies\n", rcuc->comm, j);
|
||||||
|
sched_show_task(rcuc);
|
||||||
|
if (!trigger_single_cpu_backtrace(cpu))
|
||||||
|
dump_cpu_task(cpu);
|
||||||
|
}
|
||||||
|
|
||||||
/* Complain about starvation of grace-period kthread. */
|
/* Complain about starvation of grace-period kthread. */
|
||||||
static void rcu_check_gp_kthread_starvation(void)
|
static void rcu_check_gp_kthread_starvation(void)
|
||||||
{
|
{
|
||||||
@@ -601,6 +633,9 @@ static void print_cpu_stall(unsigned long gps)
|
|||||||
rcu_check_gp_kthread_expired_fqs_timer();
|
rcu_check_gp_kthread_expired_fqs_timer();
|
||||||
rcu_check_gp_kthread_starvation();
|
rcu_check_gp_kthread_starvation();
|
||||||
|
|
||||||
|
if (!use_softirq)
|
||||||
|
rcuc_kthread_dump(rdp);
|
||||||
|
|
||||||
rcu_dump_cpu_stacks();
|
rcu_dump_cpu_stacks();
|
||||||
|
|
||||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||||
|
|||||||
@@ -407,6 +407,13 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__wait_rcu_gp);
|
EXPORT_SYMBOL_GPL(__wait_rcu_gp);
|
||||||
|
|
||||||
|
void finish_rcuwait(struct rcuwait *w)
|
||||||
|
{
|
||||||
|
rcu_assign_pointer(w->task, NULL);
|
||||||
|
__set_current_state(TASK_RUNNING);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(finish_rcuwait);
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
|
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
|
||||||
void init_rcu_head(struct rcu_head *head)
|
void init_rcu_head(struct rcu_head *head)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -911,7 +911,7 @@ void torture_kthread_stopping(char *title)
|
|||||||
{
|
{
|
||||||
char buf[128];
|
char buf[128];
|
||||||
|
|
||||||
snprintf(buf, sizeof(buf), "Stopping %s", title);
|
snprintf(buf, sizeof(buf), "%s is stopping", title);
|
||||||
VERBOSE_TOROUT_STRING(buf);
|
VERBOSE_TOROUT_STRING(buf);
|
||||||
while (!kthread_should_stop()) {
|
while (!kthread_should_stop()) {
|
||||||
torture_shutdown_absorb(title);
|
torture_shutdown_absorb(title);
|
||||||
@@ -931,12 +931,14 @@ int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m,
|
|||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
VERBOSE_TOROUT_STRING(m);
|
VERBOSE_TOROUT_STRING(m);
|
||||||
*tp = kthread_run(fn, arg, "%s", s);
|
*tp = kthread_create(fn, arg, "%s", s);
|
||||||
if (IS_ERR(*tp)) {
|
if (IS_ERR(*tp)) {
|
||||||
ret = PTR_ERR(*tp);
|
ret = PTR_ERR(*tp);
|
||||||
TOROUT_ERRSTRING(f);
|
TOROUT_ERRSTRING(f);
|
||||||
*tp = NULL;
|
*tp = NULL;
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
wake_up_process(*tp); // Process is sleeping, so ordering provided.
|
||||||
torture_shuffle_task_register(*tp);
|
torture_shuffle_task_register(*tp);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,7 +10,7 @@
|
|||||||
#
|
#
|
||||||
# Authors: Paul E. McKenney <paulmck@kernel.org>
|
# Authors: Paul E. McKenney <paulmck@kernel.org>
|
||||||
|
|
||||||
egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:|detected stalls on CPUs/tasks:|self-detected stall on CPU|Stall ended before state dump start|\?\?\? Writer stall state|rcu_.*kthread starved for|!!!' |
|
egrep 'Badness|WARNING:|Warn|BUG|===========|BUG: KCSAN:|Call Trace:|Oops:|detected stalls on CPUs/tasks:|self-detected stall on CPU|Stall ended before state dump start|\?\?\? Writer stall state|rcu_.*kthread starved for|!!!' |
|
||||||
grep -v 'ODEBUG: ' |
|
grep -v 'ODEBUG: ' |
|
||||||
grep -v 'This means that this is a DEBUG kernel and it is' |
|
grep -v 'This means that this is a DEBUG kernel and it is' |
|
||||||
grep -v 'Warning: unable to open an initial console' |
|
grep -v 'Warning: unable to open an initial console' |
|
||||||
|
|||||||
@@ -47,8 +47,8 @@ else
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
KVM="`pwd`/tools/testing/selftests/rcutorture"; export KVM
|
RCUTORTURE="`pwd`/tools/testing/selftests/rcutorture"; export RCUTORTURE
|
||||||
PATH=${KVM}/bin:$PATH; export PATH
|
PATH=${RCUTORTURE}/bin:$PATH; export PATH
|
||||||
. functions.sh
|
. functions.sh
|
||||||
|
|
||||||
dryrun=
|
dryrun=
|
||||||
|
|||||||
@@ -49,8 +49,8 @@ fi
|
|||||||
mkdir $resdir/$ds
|
mkdir $resdir/$ds
|
||||||
echo Results directory: $resdir/$ds
|
echo Results directory: $resdir/$ds
|
||||||
|
|
||||||
KVM="`pwd`/tools/testing/selftests/rcutorture"; export KVM
|
RCUTORTURE="`pwd`/tools/testing/selftests/rcutorture"; export RCUTORTURE
|
||||||
PATH=${KVM}/bin:$PATH; export PATH
|
PATH=${RCUTORTURE}/bin:$PATH; export PATH
|
||||||
. functions.sh
|
. functions.sh
|
||||||
echo Using all `identify_qemu_vcpus` CPUs.
|
echo Using all `identify_qemu_vcpus` CPUs.
|
||||||
|
|
||||||
|
|||||||
@@ -22,8 +22,8 @@ T=${TMPDIR-/tmp}/kvm-end-run-stats.sh.$$
|
|||||||
trap 'rm -rf $T' 0
|
trap 'rm -rf $T' 0
|
||||||
mkdir $T
|
mkdir $T
|
||||||
|
|
||||||
KVM="`pwd`/tools/testing/selftests/rcutorture"; export KVM
|
RCUTORTURE="`pwd`/tools/testing/selftests/rcutorture"; export RCUTORTURE
|
||||||
PATH=${KVM}/bin:$PATH; export PATH
|
PATH=${RCUTORTURE}/bin:$PATH; export PATH
|
||||||
. functions.sh
|
. functions.sh
|
||||||
default_starttime="`get_starttime`"
|
default_starttime="`get_starttime`"
|
||||||
starttime="${2-default_starttime}"
|
starttime="${2-default_starttime}"
|
||||||
|
|||||||
@@ -30,10 +30,16 @@ editor=${EDITOR-vi}
|
|||||||
files=
|
files=
|
||||||
for i in ${rundir}/*/Make.out
|
for i in ${rundir}/*/Make.out
|
||||||
do
|
do
|
||||||
|
scenariodir="`dirname $i`"
|
||||||
|
scenariobasedir="`echo ${scenariodir} | sed -e 's/\.[0-9]*$//'`"
|
||||||
if egrep -q "error:|warning:|^ld: .*undefined reference to" < $i
|
if egrep -q "error:|warning:|^ld: .*undefined reference to" < $i
|
||||||
then
|
then
|
||||||
egrep "error:|warning:|^ld: .*undefined reference to" < $i > $i.diags
|
egrep "error:|warning:|^ld: .*undefined reference to" < $i > $i.diags
|
||||||
files="$files $i.diags $i"
|
files="$files $i.diags $i"
|
||||||
|
elif ! test -f ${scenariobasedir}/vmlinux
|
||||||
|
then
|
||||||
|
echo No ${scenariobasedir}/vmlinux file > $i.diags
|
||||||
|
files="$files $i.diags $i"
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
if test -n "$files"
|
if test -n "$files"
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ stopstate="`grep 'End-test grace-period state: g' $i/console.log 2> /dev/null |
|
|||||||
tail -1 | sed -e 's/^\[[ 0-9.]*] //' |
|
tail -1 | sed -e 's/^\[[ 0-9.]*] //' |
|
||||||
awk '{ print \"[\" $1 \" \" $5 \" \" $6 \" \" $7 \"]\"; }' |
|
awk '{ print \"[\" $1 \" \" $5 \" \" $6 \" \" $7 \"]\"; }' |
|
||||||
tr -d '\012\015'`"
|
tr -d '\012\015'`"
|
||||||
fwdprog="`grep 'rcu_torture_fwd_prog n_max_cbs: ' $i/console.log 2> /dev/null | sed -e 's/^\[[^]]*] //' | sort -k3nr | head -1 | awk '{ print $2 " " $3 }'`"
|
fwdprog="`grep 'rcu_torture_fwd_prog n_max_cbs: ' $i/console.log 2> /dev/null | sed -e 's/^\[[^]]*] //' | sort -k3nr | head -1 | awk '{ print $2 " " $3 }' | tr -d '\015'`"
|
||||||
if test -z "$ngps"
|
if test -z "$ngps"
|
||||||
then
|
then
|
||||||
echo "$configfile ------- " $stopstate
|
echo "$configfile ------- " $stopstate
|
||||||
|
|||||||
@@ -19,8 +19,8 @@ then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
KVM="`pwd`/tools/testing/selftests/rcutorture"; export KVM
|
RCUTORTURE="`pwd`/tools/testing/selftests/rcutorture"; export RCUTORTURE
|
||||||
PATH=${KVM}/bin:$PATH; export PATH
|
PATH=${RCUTORTURE}/bin:$PATH; export PATH
|
||||||
. functions.sh
|
. functions.sh
|
||||||
|
|
||||||
starttime="`get_starttime`"
|
starttime="`get_starttime`"
|
||||||
@@ -108,8 +108,8 @@ else
|
|||||||
cat $T/kvm-again.sh.out | tee -a "$oldrun/remote-log"
|
cat $T/kvm-again.sh.out | tee -a "$oldrun/remote-log"
|
||||||
exit 2
|
exit 2
|
||||||
fi
|
fi
|
||||||
cp -a "$rundir" "$KVM/res/"
|
cp -a "$rundir" "$RCUTORTURE/res/"
|
||||||
oldrun="$KVM/res/$ds"
|
oldrun="$RCUTORTURE/res/$ds"
|
||||||
fi
|
fi
|
||||||
echo | tee -a "$oldrun/remote-log"
|
echo | tee -a "$oldrun/remote-log"
|
||||||
echo " ----" kvm-again.sh output: "(`date`)" | tee -a "$oldrun/remote-log"
|
echo " ----" kvm-again.sh output: "(`date`)" | tee -a "$oldrun/remote-log"
|
||||||
@@ -155,18 +155,23 @@ do
|
|||||||
echo Downloading tarball to $i `date` | tee -a "$oldrun/remote-log"
|
echo Downloading tarball to $i `date` | tee -a "$oldrun/remote-log"
|
||||||
cat $T/binres.tgz | ssh $i "cd /tmp; tar -xzf -"
|
cat $T/binres.tgz | ssh $i "cd /tmp; tar -xzf -"
|
||||||
ret=$?
|
ret=$?
|
||||||
if test "$ret" -ne 0
|
tries=0
|
||||||
then
|
while test "$ret" -ne 0
|
||||||
echo Unable to download $T/binres.tgz to system $i, waiting and then retrying. | tee -a "$oldrun/remote-log"
|
do
|
||||||
|
echo Unable to download $T/binres.tgz to system $i, waiting and then retrying. $tries prior retries. | tee -a "$oldrun/remote-log"
|
||||||
sleep 60
|
sleep 60
|
||||||
cat $T/binres.tgz | ssh $i "cd /tmp; tar -xzf -"
|
cat $T/binres.tgz | ssh $i "cd /tmp; tar -xzf -"
|
||||||
ret=$?
|
ret=$?
|
||||||
if test "$ret" -ne 0
|
if test "$ret" -ne 0
|
||||||
then
|
then
|
||||||
echo Unable to download $T/binres.tgz to system $i, giving up. | tee -a "$oldrun/remote-log"
|
if test "$tries" > 5
|
||||||
exit 10
|
then
|
||||||
|
echo Unable to download $T/binres.tgz to system $i, giving up. | tee -a "$oldrun/remote-log"
|
||||||
|
exit 10
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
tries=$((tries+1))
|
||||||
|
done
|
||||||
done
|
done
|
||||||
|
|
||||||
# Function to check for presence of a file on the specified system.
|
# Function to check for presence of a file on the specified system.
|
||||||
|
|||||||
@@ -25,15 +25,15 @@ LANG=en_US.UTF-8; export LANG
|
|||||||
|
|
||||||
dur=$((30*60))
|
dur=$((30*60))
|
||||||
dryrun=""
|
dryrun=""
|
||||||
KVM="`pwd`/tools/testing/selftests/rcutorture"; export KVM
|
RCUTORTURE="`pwd`/tools/testing/selftests/rcutorture"; export RCUTORTURE
|
||||||
PATH=${KVM}/bin:$PATH; export PATH
|
PATH=${RCUTORTURE}/bin:$PATH; export PATH
|
||||||
. functions.sh
|
. functions.sh
|
||||||
|
|
||||||
TORTURE_ALLOTED_CPUS="`identify_qemu_vcpus`"
|
TORTURE_ALLOTED_CPUS="`identify_qemu_vcpus`"
|
||||||
TORTURE_DEFCONFIG=defconfig
|
TORTURE_DEFCONFIG=defconfig
|
||||||
TORTURE_BOOT_IMAGE=""
|
TORTURE_BOOT_IMAGE=""
|
||||||
TORTURE_BUILDONLY=
|
TORTURE_BUILDONLY=
|
||||||
TORTURE_INITRD="$KVM/initrd"; export TORTURE_INITRD
|
TORTURE_INITRD="$RCUTORTURE/initrd"; export TORTURE_INITRD
|
||||||
TORTURE_KCONFIG_ARG=""
|
TORTURE_KCONFIG_ARG=""
|
||||||
TORTURE_KCONFIG_GDB_ARG=""
|
TORTURE_KCONFIG_GDB_ARG=""
|
||||||
TORTURE_BOOT_GDB_ARG=""
|
TORTURE_BOOT_GDB_ARG=""
|
||||||
@@ -262,7 +262,7 @@ else
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
CONFIGFRAG=${KVM}/configs/${TORTURE_SUITE}; export CONFIGFRAG
|
CONFIGFRAG=${RCUTORTURE}/configs/${TORTURE_SUITE}; export CONFIGFRAG
|
||||||
|
|
||||||
defaultconfigs="`tr '\012' ' ' < $CONFIGFRAG/CFLIST`"
|
defaultconfigs="`tr '\012' ' ' < $CONFIGFRAG/CFLIST`"
|
||||||
if test -z "$configs"
|
if test -z "$configs"
|
||||||
@@ -272,7 +272,7 @@ fi
|
|||||||
|
|
||||||
if test -z "$resdir"
|
if test -z "$resdir"
|
||||||
then
|
then
|
||||||
resdir=$KVM/res
|
resdir=$RCUTORTURE/res
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Create a file of test-name/#cpus pairs, sorted by decreasing #cpus.
|
# Create a file of test-name/#cpus pairs, sorted by decreasing #cpus.
|
||||||
@@ -280,7 +280,7 @@ configs_derep=
|
|||||||
for CF in $configs
|
for CF in $configs
|
||||||
do
|
do
|
||||||
case $CF in
|
case $CF in
|
||||||
[0-9]\**|[0-9][0-9]\**|[0-9][0-9][0-9]\**)
|
[0-9]\**|[0-9][0-9]\**|[0-9][0-9][0-9]\**|[0-9][0-9][0-9][0-9]\**)
|
||||||
config_reps=`echo $CF | sed -e 's/\*.*$//'`
|
config_reps=`echo $CF | sed -e 's/\*.*$//'`
|
||||||
CF1=`echo $CF | sed -e 's/^[^*]*\*//'`
|
CF1=`echo $CF | sed -e 's/^[^*]*\*//'`
|
||||||
;;
|
;;
|
||||||
@@ -386,7 +386,7 @@ END {
|
|||||||
# Generate a script to execute the tests in appropriate batches.
|
# Generate a script to execute the tests in appropriate batches.
|
||||||
cat << ___EOF___ > $T/script
|
cat << ___EOF___ > $T/script
|
||||||
CONFIGFRAG="$CONFIGFRAG"; export CONFIGFRAG
|
CONFIGFRAG="$CONFIGFRAG"; export CONFIGFRAG
|
||||||
KVM="$KVM"; export KVM
|
RCUTORTURE="$RCUTORTURE"; export RCUTORTURE
|
||||||
PATH="$PATH"; export PATH
|
PATH="$PATH"; export PATH
|
||||||
TORTURE_ALLOTED_CPUS="$TORTURE_ALLOTED_CPUS"; export TORTURE_ALLOTED_CPUS
|
TORTURE_ALLOTED_CPUS="$TORTURE_ALLOTED_CPUS"; export TORTURE_ALLOTED_CPUS
|
||||||
TORTURE_BOOT_IMAGE="$TORTURE_BOOT_IMAGE"; export TORTURE_BOOT_IMAGE
|
TORTURE_BOOT_IMAGE="$TORTURE_BOOT_IMAGE"; export TORTURE_BOOT_IMAGE
|
||||||
@@ -569,7 +569,7 @@ ___EOF___
|
|||||||
awk < $T/cfgcpu.pack \
|
awk < $T/cfgcpu.pack \
|
||||||
-v TORTURE_BUILDONLY="$TORTURE_BUILDONLY" \
|
-v TORTURE_BUILDONLY="$TORTURE_BUILDONLY" \
|
||||||
-v CONFIGDIR="$CONFIGFRAG/" \
|
-v CONFIGDIR="$CONFIGFRAG/" \
|
||||||
-v KVM="$KVM" \
|
-v RCUTORTURE="$RCUTORTURE" \
|
||||||
-v ncpus=$cpus \
|
-v ncpus=$cpus \
|
||||||
-v jitter="$jitter" \
|
-v jitter="$jitter" \
|
||||||
-v rd=$resdir/$ds/ \
|
-v rd=$resdir/$ds/ \
|
||||||
|
|||||||
@@ -138,6 +138,16 @@ then
|
|||||||
then
|
then
|
||||||
summary="$summary Bugs: $n_bugs"
|
summary="$summary Bugs: $n_bugs"
|
||||||
fi
|
fi
|
||||||
|
n_kcsan=`egrep -c 'BUG: KCSAN: ' $file`
|
||||||
|
if test "$n_kcsan" -ne 0
|
||||||
|
then
|
||||||
|
if test "$n_bugs" = "$n_kcsan"
|
||||||
|
then
|
||||||
|
summary="$summary (all bugs kcsan)"
|
||||||
|
else
|
||||||
|
summary="$summary KCSAN: $n_kcsan"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
n_calltrace=`grep -c 'Call Trace:' $file`
|
n_calltrace=`grep -c 'Call Trace:' $file`
|
||||||
if test "$n_calltrace" -ne 0
|
if test "$n_calltrace" -ne 0
|
||||||
then
|
then
|
||||||
|
|||||||
@@ -13,8 +13,8 @@
|
|||||||
scriptname=$0
|
scriptname=$0
|
||||||
args="$*"
|
args="$*"
|
||||||
|
|
||||||
KVM="`pwd`/tools/testing/selftests/rcutorture"; export KVM
|
RCUTORTURE="`pwd`/tools/testing/selftests/rcutorture"; export RCUTORTURE
|
||||||
PATH=${KVM}/bin:$PATH; export PATH
|
PATH=${RCUTORTURE}/bin:$PATH; export PATH
|
||||||
. functions.sh
|
. functions.sh
|
||||||
|
|
||||||
TORTURE_ALLOTED_CPUS="`identify_qemu_vcpus`"
|
TORTURE_ALLOTED_CPUS="`identify_qemu_vcpus`"
|
||||||
@@ -37,7 +37,7 @@ configs_scftorture=
|
|||||||
kcsan_kmake_args=
|
kcsan_kmake_args=
|
||||||
|
|
||||||
# Default compression, duration, and apportionment.
|
# Default compression, duration, and apportionment.
|
||||||
compress_kasan_vmlinux="`identify_qemu_vcpus`"
|
compress_concurrency="`identify_qemu_vcpus`"
|
||||||
duration_base=10
|
duration_base=10
|
||||||
duration_rcutorture_frac=7
|
duration_rcutorture_frac=7
|
||||||
duration_locktorture_frac=1
|
duration_locktorture_frac=1
|
||||||
@@ -67,12 +67,12 @@ function doyesno () {
|
|||||||
|
|
||||||
usage () {
|
usage () {
|
||||||
echo "Usage: $scriptname optional arguments:"
|
echo "Usage: $scriptname optional arguments:"
|
||||||
echo " --compress-kasan-vmlinux concurrency"
|
echo " --compress-concurrency concurrency"
|
||||||
echo " --configs-rcutorture \"config-file list w/ repeat factor (3*TINY01)\""
|
echo " --configs-rcutorture \"config-file list w/ repeat factor (3*TINY01)\""
|
||||||
echo " --configs-locktorture \"config-file list w/ repeat factor (10*LOCK01)\""
|
echo " --configs-locktorture \"config-file list w/ repeat factor (10*LOCK01)\""
|
||||||
echo " --configs-scftorture \"config-file list w/ repeat factor (2*CFLIST)\""
|
echo " --configs-scftorture \"config-file list w/ repeat factor (2*CFLIST)\""
|
||||||
echo " --doall"
|
echo " --do-all"
|
||||||
echo " --doallmodconfig / --do-no-allmodconfig"
|
echo " --do-allmodconfig / --do-no-allmodconfig"
|
||||||
echo " --do-clocksourcewd / --do-no-clocksourcewd"
|
echo " --do-clocksourcewd / --do-no-clocksourcewd"
|
||||||
echo " --do-kasan / --do-no-kasan"
|
echo " --do-kasan / --do-no-kasan"
|
||||||
echo " --do-kcsan / --do-no-kcsan"
|
echo " --do-kcsan / --do-no-kcsan"
|
||||||
@@ -91,9 +91,9 @@ usage () {
|
|||||||
while test $# -gt 0
|
while test $# -gt 0
|
||||||
do
|
do
|
||||||
case "$1" in
|
case "$1" in
|
||||||
--compress-kasan-vmlinux)
|
--compress-concurrency)
|
||||||
checkarg --compress-kasan-vmlinux "(concurrency level)" $# "$2" '^[0-9][0-9]*$' '^error'
|
checkarg --compress-concurrency "(concurrency level)" $# "$2" '^[0-9][0-9]*$' '^error'
|
||||||
compress_kasan_vmlinux=$2
|
compress_concurrency=$2
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
--config-rcutorture|--configs-rcutorture)
|
--config-rcutorture|--configs-rcutorture)
|
||||||
@@ -414,8 +414,14 @@ nfailures=0
|
|||||||
echo FAILURES: | tee -a $T/log
|
echo FAILURES: | tee -a $T/log
|
||||||
if test -s "$T/failures"
|
if test -s "$T/failures"
|
||||||
then
|
then
|
||||||
cat "$T/failures" | tee -a $T/log
|
awk < "$T/failures" -v sq="'" '{ print "echo " sq $0 sq; print "sed -e " sq "1,/^ --- .* Test summary:$/d" sq " " $2 "/log | grep Summary: | sed -e " sq "s/^[^S]*/ /" sq; }' | sh | tee -a $T/log | tee "$T/failuresum"
|
||||||
nfailures="`wc -l "$T/failures" | awk '{ print $1 }'`"
|
nfailures="`wc -l "$T/failures" | awk '{ print $1 }'`"
|
||||||
|
grep "^ Summary: " "$T/failuresum" |
|
||||||
|
grep -v '^ Summary: Bugs: [0-9]* (all bugs kcsan)$' > "$T/nonkcsan"
|
||||||
|
if test -s "$T/nonkcsan"
|
||||||
|
then
|
||||||
|
nonkcsanbug="yes"
|
||||||
|
fi
|
||||||
ret=2
|
ret=2
|
||||||
fi
|
fi
|
||||||
if test "$do_kcsan" = "yes"
|
if test "$do_kcsan" = "yes"
|
||||||
@@ -424,12 +430,16 @@ then
|
|||||||
fi
|
fi
|
||||||
echo Started at $startdate, ended at `date`, duration `get_starttime_duration $starttime`. | tee -a $T/log
|
echo Started at $startdate, ended at `date`, duration `get_starttime_duration $starttime`. | tee -a $T/log
|
||||||
echo Summary: Successes: $nsuccesses Failures: $nfailures. | tee -a $T/log
|
echo Summary: Successes: $nsuccesses Failures: $nfailures. | tee -a $T/log
|
||||||
|
if test -z "$nonkcsanbug" && test -s "$T/failuresum"
|
||||||
|
then
|
||||||
|
echo " All bugs were KCSAN failures."
|
||||||
|
fi
|
||||||
tdir="`cat $T/successes $T/failures | head -1 | awk '{ print $NF }' | sed -e 's,/[^/]\+/*$,,'`"
|
tdir="`cat $T/successes $T/failures | head -1 | awk '{ print $NF }' | sed -e 's,/[^/]\+/*$,,'`"
|
||||||
if test -n "$tdir" && test $compress_kasan_vmlinux -gt 0
|
if test -n "$tdir" && test $compress_concurrency -gt 0
|
||||||
then
|
then
|
||||||
# KASAN vmlinux files can approach 1GB in size, so compress them.
|
# KASAN vmlinux files can approach 1GB in size, so compress them.
|
||||||
echo Looking for KASAN files to compress: `date` > "$tdir/log-xz" 2>&1
|
echo Looking for K[AC]SAN files to compress: `date` > "$tdir/log-xz" 2>&1
|
||||||
find "$tdir" -type d -name '*-kasan' -print > $T/xz-todo
|
find "$tdir" -type d -name '*-k[ac]san' -print > $T/xz-todo
|
||||||
ncompresses=0
|
ncompresses=0
|
||||||
batchno=1
|
batchno=1
|
||||||
if test -s $T/xz-todo
|
if test -s $T/xz-todo
|
||||||
@@ -447,7 +457,7 @@ then
|
|||||||
do
|
do
|
||||||
xz "$j" >> "$tdir/log-xz" 2>&1 &
|
xz "$j" >> "$tdir/log-xz" 2>&1 &
|
||||||
ncompresses=$((ncompresses+1))
|
ncompresses=$((ncompresses+1))
|
||||||
if test $ncompresses -ge $compress_kasan_vmlinux
|
if test $ncompresses -ge $compress_concurrency
|
||||||
then
|
then
|
||||||
echo Waiting for batch $batchno of $ncompresses compressions `date` | tee -a "$tdir/log-xz" | tee -a $T/log
|
echo Waiting for batch $batchno of $ncompresses compressions `date` | tee -a "$tdir/log-xz" | tee -a $T/log
|
||||||
wait
|
wait
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
CONFIG_SMP=y
|
CONFIG_SMP=y
|
||||||
CONFIG_NR_CPUS=4
|
CONFIG_NR_CPUS=3
|
||||||
CONFIG_HOTPLUG_CPU=y
|
CONFIG_HOTPLUG_CPU=y
|
||||||
CONFIG_PREEMPT_NONE=n
|
CONFIG_PREEMPT_NONE=n
|
||||||
CONFIG_PREEMPT_VOLUNTARY=n
|
CONFIG_PREEMPT_VOLUNTARY=n
|
||||||
|
|||||||
@@ -1 +1,2 @@
|
|||||||
rcutorture.torture_type=srcu
|
rcutorture.torture_type=srcu
|
||||||
|
rcutorture.fwd_progress=3
|
||||||
|
|||||||
@@ -1,2 +1,4 @@
|
|||||||
rcutorture.torture_type=srcud
|
rcutorture.torture_type=srcud
|
||||||
rcupdate.rcu_self_test=1
|
rcupdate.rcu_self_test=1
|
||||||
|
rcutorture.fwd_progress=3
|
||||||
|
srcutree.big_cpu_lim=5
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
CONFIG_SMP=y
|
CONFIG_SMP=y
|
||||||
CONFIG_NR_CPUS=4
|
CONFIG_NR_CPUS=5
|
||||||
CONFIG_HOTPLUG_CPU=y
|
CONFIG_HOTPLUG_CPU=y
|
||||||
CONFIG_PREEMPT_NONE=y
|
CONFIG_PREEMPT_NONE=y
|
||||||
CONFIG_PREEMPT_VOLUNTARY=n
|
CONFIG_PREEMPT_VOLUNTARY=n
|
||||||
|
|||||||
Reference in New Issue
Block a user