locking: Remove spin_lock_flags() etc
parisc, ia64 and powerpc32 are the only remaining architectures that provide custom arch_{spin,read,write}_lock_flags() functions, which are meant to re-enable interrupts while waiting for a spinlock. However, none of these can actually run into this codepath, because it is only called on architectures without CONFIG_GENERIC_LOCKBREAK, or when CONFIG_DEBUG_LOCK_ALLOC is set without CONFIG_LOCKDEP, and none of those combinations are possible on the three architectures. Going back in the git history, it appears that arch/mn10300 may have been able to run into this code path, but there is a good chance that it never worked. On the architectures that still exist, it was already impossible to hit back in 2008 after the introduction of CONFIG_GENERIC_LOCKBREAK, and possibly earlier. As this is all dead code, just remove it and the helper functions built around it. For arch/ia64, the inline asm could be cleaned up, but it seems safer to leave it untouched. Signed-off-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Helge Deller <deller@gmx.de> # parisc Link: https://lore.kernel.org/r/20211022120058.1031690-1-arnd@kernel.org
This commit is contained in:
parent
5197fcd09a
commit
f98a3dccfc
@ -124,18 +124,13 @@ static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
|
|||||||
__ticket_spin_unlock(lock);
|
__ticket_spin_unlock(lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
|
|
||||||
unsigned long flags)
|
|
||||||
{
|
|
||||||
arch_spin_lock(lock);
|
|
||||||
}
|
|
||||||
#define arch_spin_lock_flags arch_spin_lock_flags
|
|
||||||
|
|
||||||
#ifdef ASM_SUPPORTED
|
#ifdef ASM_SUPPORTED
|
||||||
|
|
||||||
static __always_inline void
|
static __always_inline void
|
||||||
arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
|
arch_read_lock(arch_rwlock_t *lock)
|
||||||
{
|
{
|
||||||
|
unsigned long flags = 0;
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"tbit.nz p6, p0 = %1,%2\n"
|
"tbit.nz p6, p0 = %1,%2\n"
|
||||||
"br.few 3f\n"
|
"br.few 3f\n"
|
||||||
@ -157,13 +152,8 @@ arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
|
|||||||
: "p6", "p7", "r2", "memory");
|
: "p6", "p7", "r2", "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
#define arch_read_lock_flags arch_read_lock_flags
|
|
||||||
#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
|
|
||||||
|
|
||||||
#else /* !ASM_SUPPORTED */
|
#else /* !ASM_SUPPORTED */
|
||||||
|
|
||||||
#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
|
|
||||||
|
|
||||||
#define arch_read_lock(rw) \
|
#define arch_read_lock(rw) \
|
||||||
do { \
|
do { \
|
||||||
arch_rwlock_t *__read_lock_ptr = (rw); \
|
arch_rwlock_t *__read_lock_ptr = (rw); \
|
||||||
@ -186,8 +176,10 @@ do { \
|
|||||||
#ifdef ASM_SUPPORTED
|
#ifdef ASM_SUPPORTED
|
||||||
|
|
||||||
static __always_inline void
|
static __always_inline void
|
||||||
arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
|
arch_write_lock(arch_rwlock_t *lock)
|
||||||
{
|
{
|
||||||
|
unsigned long flags = 0;
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"tbit.nz p6, p0 = %1, %2\n"
|
"tbit.nz p6, p0 = %1, %2\n"
|
||||||
"mov ar.ccv = r0\n"
|
"mov ar.ccv = r0\n"
|
||||||
@ -210,9 +202,6 @@ arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
|
|||||||
: "ar.ccv", "p6", "p7", "r2", "r29", "memory");
|
: "ar.ccv", "p6", "p7", "r2", "r29", "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
#define arch_write_lock_flags arch_write_lock_flags
|
|
||||||
#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
|
|
||||||
|
|
||||||
#define arch_write_trylock(rw) \
|
#define arch_write_trylock(rw) \
|
||||||
({ \
|
({ \
|
||||||
register long result; \
|
register long result; \
|
||||||
|
@ -19,9 +19,6 @@
|
|||||||
|
|
||||||
#include <asm/qrwlock.h>
|
#include <asm/qrwlock.h>
|
||||||
|
|
||||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
|
||||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
|
||||||
|
|
||||||
#define arch_spin_relax(lock) cpu_relax()
|
#define arch_spin_relax(lock) cpu_relax()
|
||||||
#define arch_read_relax(lock) cpu_relax()
|
#define arch_read_relax(lock) cpu_relax()
|
||||||
#define arch_write_relax(lock) cpu_relax()
|
#define arch_write_relax(lock) cpu_relax()
|
||||||
|
@ -23,21 +23,6 @@ static inline void arch_spin_lock(arch_spinlock_t *x)
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void arch_spin_lock_flags(arch_spinlock_t *x,
|
|
||||||
unsigned long flags)
|
|
||||||
{
|
|
||||||
volatile unsigned int *a;
|
|
||||||
|
|
||||||
a = __ldcw_align(x);
|
|
||||||
while (__ldcw(a) == 0)
|
|
||||||
while (*a == 0)
|
|
||||||
if (flags & PSW_SM_I) {
|
|
||||||
local_irq_enable();
|
|
||||||
local_irq_disable();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#define arch_spin_lock_flags arch_spin_lock_flags
|
|
||||||
|
|
||||||
static inline void arch_spin_unlock(arch_spinlock_t *x)
|
static inline void arch_spin_unlock(arch_spinlock_t *x)
|
||||||
{
|
{
|
||||||
volatile unsigned int *a;
|
volatile unsigned int *a;
|
||||||
|
@ -123,27 +123,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline
|
|
||||||
void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
|
|
||||||
{
|
|
||||||
unsigned long flags_dis;
|
|
||||||
|
|
||||||
while (1) {
|
|
||||||
if (likely(__arch_spin_trylock(lock) == 0))
|
|
||||||
break;
|
|
||||||
local_save_flags(flags_dis);
|
|
||||||
local_irq_restore(flags);
|
|
||||||
do {
|
|
||||||
HMT_low();
|
|
||||||
if (is_shared_processor())
|
|
||||||
splpar_spin_yield(lock);
|
|
||||||
} while (unlikely(lock->slock != 0));
|
|
||||||
HMT_medium();
|
|
||||||
local_irq_restore(flags_dis);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#define arch_spin_lock_flags arch_spin_lock_flags
|
|
||||||
|
|
||||||
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
__asm__ __volatile__("# arch_spin_unlock\n\t"
|
__asm__ __volatile__("# arch_spin_unlock\n\t"
|
||||||
|
@ -67,14 +67,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lp)
|
|||||||
arch_spin_lock_wait(lp);
|
arch_spin_lock_wait(lp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
|
|
||||||
unsigned long flags)
|
|
||||||
{
|
|
||||||
if (!arch_spin_trylock_once(lp))
|
|
||||||
arch_spin_lock_wait(lp);
|
|
||||||
}
|
|
||||||
#define arch_spin_lock_flags arch_spin_lock_flags
|
|
||||||
|
|
||||||
static inline int arch_spin_trylock(arch_spinlock_t *lp)
|
static inline int arch_spin_trylock(arch_spinlock_t *lp)
|
||||||
{
|
{
|
||||||
if (!arch_spin_trylock_once(lp))
|
if (!arch_spin_trylock_once(lp))
|
||||||
|
@ -481,23 +481,6 @@ do { \
|
|||||||
|
|
||||||
#endif /* CONFIG_LOCK_STAT */
|
#endif /* CONFIG_LOCK_STAT */
|
||||||
|
|
||||||
#ifdef CONFIG_LOCKDEP
|
|
||||||
|
|
||||||
/*
|
|
||||||
* On lockdep we dont want the hand-coded irq-enable of
|
|
||||||
* _raw_*_lock_flags() code, because lockdep assumes
|
|
||||||
* that interrupts are not re-enabled during lock-acquire:
|
|
||||||
*/
|
|
||||||
#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
|
|
||||||
LOCK_CONTENDED((_lock), (try), (lock))
|
|
||||||
|
|
||||||
#else /* CONFIG_LOCKDEP */
|
|
||||||
|
|
||||||
#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
|
|
||||||
lockfl((_lock), (flags))
|
|
||||||
|
|
||||||
#endif /* CONFIG_LOCKDEP */
|
|
||||||
|
|
||||||
#ifdef CONFIG_PROVE_LOCKING
|
#ifdef CONFIG_PROVE_LOCKING
|
||||||
extern void print_irqtrace_events(struct task_struct *curr);
|
extern void print_irqtrace_events(struct task_struct *curr);
|
||||||
#else
|
#else
|
||||||
|
@ -30,31 +30,16 @@ do { \
|
|||||||
|
|
||||||
#ifdef CONFIG_DEBUG_SPINLOCK
|
#ifdef CONFIG_DEBUG_SPINLOCK
|
||||||
extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
|
extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
|
||||||
#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock)
|
|
||||||
extern int do_raw_read_trylock(rwlock_t *lock);
|
extern int do_raw_read_trylock(rwlock_t *lock);
|
||||||
extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
|
extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
|
||||||
extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
|
extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
|
||||||
#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
|
|
||||||
extern int do_raw_write_trylock(rwlock_t *lock);
|
extern int do_raw_write_trylock(rwlock_t *lock);
|
||||||
extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock);
|
extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock);
|
||||||
#else
|
#else
|
||||||
|
|
||||||
#ifndef arch_read_lock_flags
|
|
||||||
# define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef arch_write_lock_flags
|
|
||||||
# define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
# define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
|
# define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
|
||||||
# define do_raw_read_lock_flags(lock, flags) \
|
|
||||||
do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
|
|
||||||
# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
|
# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
|
||||||
# define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
|
# define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
|
||||||
# define do_raw_write_lock(rwlock) do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0)
|
# define do_raw_write_lock(rwlock) do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0)
|
||||||
# define do_raw_write_lock_flags(lock, flags) \
|
|
||||||
do {__acquire(lock); arch_write_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
|
|
||||||
# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
|
# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
|
||||||
# define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
|
# define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
|
||||||
#endif
|
#endif
|
||||||
|
@ -157,8 +157,7 @@ static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
|
|||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
|
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
|
||||||
LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock,
|
LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
|
||||||
do_raw_read_lock_flags, &flags);
|
|
||||||
return flags;
|
return flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -184,8 +183,7 @@ static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
|
|||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
||||||
LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock,
|
LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
|
||||||
do_raw_write_lock_flags, &flags);
|
|
||||||
return flags;
|
return flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -177,7 +177,6 @@ do { \
|
|||||||
|
|
||||||
#ifdef CONFIG_DEBUG_SPINLOCK
|
#ifdef CONFIG_DEBUG_SPINLOCK
|
||||||
extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
|
extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
|
||||||
#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
|
|
||||||
extern int do_raw_spin_trylock(raw_spinlock_t *lock);
|
extern int do_raw_spin_trylock(raw_spinlock_t *lock);
|
||||||
extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
|
extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
|
||||||
#else
|
#else
|
||||||
@ -188,18 +187,6 @@ static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
|
|||||||
mmiowb_spin_lock();
|
mmiowb_spin_lock();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef arch_spin_lock_flags
|
|
||||||
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
|
|
||||||
{
|
|
||||||
__acquire(lock);
|
|
||||||
arch_spin_lock_flags(&lock->raw_lock, *flags);
|
|
||||||
mmiowb_spin_lock();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
|
static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
int ret = arch_spin_trylock(&(lock)->raw_lock);
|
int ret = arch_spin_trylock(&(lock)->raw_lock);
|
||||||
|
@ -108,16 +108,7 @@ static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
|
|||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
||||||
/*
|
|
||||||
* On lockdep we dont want the hand-coded irq-enable of
|
|
||||||
* do_raw_spin_lock_flags() code, because lockdep assumes
|
|
||||||
* that interrupts are not re-enabled during lock-acquire:
|
|
||||||
*/
|
|
||||||
#ifdef CONFIG_LOCKDEP
|
|
||||||
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
|
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
|
||||||
#else
|
|
||||||
do_raw_spin_lock_flags(lock, &flags);
|
|
||||||
#endif
|
|
||||||
return flags;
|
return flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -62,7 +62,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
|||||||
#define arch_spin_is_locked(lock) ((void)(lock), 0)
|
#define arch_spin_is_locked(lock) ((void)(lock), 0)
|
||||||
/* for sched/core.c and kernel_lock.c: */
|
/* for sched/core.c and kernel_lock.c: */
|
||||||
# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0)
|
# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0)
|
||||||
# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0)
|
|
||||||
# define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0)
|
# define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0)
|
||||||
# define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; })
|
# define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; })
|
||||||
#endif /* DEBUG_SPINLOCK */
|
#endif /* DEBUG_SPINLOCK */
|
||||||
|
@ -378,8 +378,7 @@ unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
|
|||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
|
spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
|
||||||
LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock,
|
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
|
||||||
do_raw_spin_lock_flags, &flags);
|
|
||||||
return flags;
|
return flags;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested);
|
EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested);
|
||||||
|
Loading…
Reference in New Issue
Block a user