mirror of
https://github.com/lkl/linux.git
synced 2025-12-19 16:13:19 +09:00
locking/lockdep: Remove unused @nested argument from lock_release()
Since the following commit:
b4adfe8e05 ("locking/lockdep: Remove unused argument in __lock_release")
@nested is no longer used in lock_release(), so remove it from all
lock_release() calls and friends.
Signed-off-by: Qian Cai <cai@lca.pw>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will@kernel.org>
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: airlied@linux.ie
Cc: akpm@linux-foundation.org
Cc: alexander.levin@microsoft.com
Cc: daniel@iogearbox.net
Cc: davem@davemloft.net
Cc: dri-devel@lists.freedesktop.org
Cc: duyuyang@gmail.com
Cc: gregkh@linuxfoundation.org
Cc: hannes@cmpxchg.org
Cc: intel-gfx@lists.freedesktop.org
Cc: jack@suse.com
Cc: jlbec@evilplan.or
Cc: joonas.lahtinen@linux.intel.com
Cc: joseph.qi@linux.alibaba.com
Cc: jslaby@suse.com
Cc: juri.lelli@redhat.com
Cc: maarten.lankhorst@linux.intel.com
Cc: mark@fasheh.com
Cc: mhocko@kernel.org
Cc: mripard@kernel.org
Cc: ocfs2-devel@oss.oracle.com
Cc: rodrigo.vivi@intel.com
Cc: sean@poorly.run
Cc: st@kernel.org
Cc: tj@kernel.org
Cc: tytso@mit.edu
Cc: vdavydov.dev@gmail.com
Cc: vincent.guittot@linaro.org
Cc: viro@zeniv.linux.org.uk
Link: https://lkml.kernel.org/r/1568909380-32199-1-git-send-email-cai@lca.pw
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -349,8 +349,7 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
||||
int trylock, int read, int check,
|
||||
struct lockdep_map *nest_lock, unsigned long ip);
|
||||
|
||||
extern void lock_release(struct lockdep_map *lock, int nested,
|
||||
unsigned long ip);
|
||||
extern void lock_release(struct lockdep_map *lock, unsigned long ip);
|
||||
|
||||
/*
|
||||
* Same "read" as for lock_acquire(), except -1 means any.
|
||||
@@ -428,7 +427,7 @@ static inline void lockdep_set_selftest_task(struct task_struct *task)
|
||||
}
|
||||
|
||||
# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
|
||||
# define lock_release(l, n, i) do { } while (0)
|
||||
# define lock_release(l, i) do { } while (0)
|
||||
# define lock_downgrade(l, i) do { } while (0)
|
||||
# define lock_set_class(l, n, k, s, i) do { } while (0)
|
||||
# define lock_set_subclass(l, s, i) do { } while (0)
|
||||
@@ -591,42 +590,42 @@ static inline void print_irqtrace_events(struct task_struct *curr)
|
||||
|
||||
#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
|
||||
#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
|
||||
#define spin_release(l, n, i) lock_release(l, n, i)
|
||||
#define spin_release(l, i) lock_release(l, i)
|
||||
|
||||
#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
|
||||
#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
|
||||
#define rwlock_release(l, n, i) lock_release(l, n, i)
|
||||
#define rwlock_release(l, i) lock_release(l, i)
|
||||
|
||||
#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
|
||||
#define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
|
||||
#define seqcount_release(l, n, i) lock_release(l, n, i)
|
||||
#define seqcount_release(l, i) lock_release(l, i)
|
||||
|
||||
#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
|
||||
#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
|
||||
#define mutex_release(l, n, i) lock_release(l, n, i)
|
||||
#define mutex_release(l, i) lock_release(l, i)
|
||||
|
||||
#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
|
||||
#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
|
||||
#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
|
||||
#define rwsem_release(l, n, i) lock_release(l, n, i)
|
||||
#define rwsem_release(l, i) lock_release(l, i)
|
||||
|
||||
#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
|
||||
#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
|
||||
#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
|
||||
#define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
|
||||
#define lock_map_release(l) lock_release(l, _THIS_IP_)
|
||||
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
# define might_lock(lock) \
|
||||
do { \
|
||||
typecheck(struct lockdep_map *, &(lock)->dep_map); \
|
||||
lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
|
||||
lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
|
||||
lock_release(&(lock)->dep_map, _THIS_IP_); \
|
||||
} while (0)
|
||||
# define might_lock_read(lock) \
|
||||
do { \
|
||||
typecheck(struct lockdep_map *, &(lock)->dep_map); \
|
||||
lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
|
||||
lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
|
||||
lock_release(&(lock)->dep_map, _THIS_IP_); \
|
||||
} while (0)
|
||||
|
||||
#define lockdep_assert_irqs_enabled() do { \
|
||||
|
||||
Reference in New Issue
Block a user