mirror of
https://github.com/lkl/linux.git
synced 2025-12-19 16:13:19 +09:00
locking/atomic: treewide: use raw_atomic*_<op>()
Now that we have raw_atomic*_<op>() definitions, there's no need to use arch_atomic*_<op>() definitions outside of the low-level atomic definitions. Move treewide users of arch_atomic*_<op>() over to the equivalent raw_atomic*_<op>(). There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Kees Cook <keescook@chromium.org> Link: https://lore.kernel.org/r/20230605070124.3741859-19-mark.rutland@arm.com
This commit is contained in:
committed by
Peter Zijlstra
parent
c9268ac615
commit
0f613bfa82
@@ -119,7 +119,7 @@ extern void ct_idle_exit(void);
|
||||
*/
|
||||
static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
|
||||
{
|
||||
return !(arch_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX);
|
||||
return !(raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -128,7 +128,7 @@ static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
|
||||
*/
|
||||
static __always_inline unsigned long ct_state_inc(int incby)
|
||||
{
|
||||
return arch_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state));
|
||||
return raw_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state));
|
||||
}
|
||||
|
||||
static __always_inline bool warn_rcu_enter(void)
|
||||
|
||||
Reference in New Issue
Block a user