kernel-ark/arch/m68k/include/asm/atomic.h
Greg Ungerer 7224c0d104 m68k: include asm/cmpxchg.h in our m68k atomic.h
After commit 9ffc93f203 ("Remove all

  CC      init/main.o
In file included from include/linux/mm.h:15:0,
                 from include/linux/ring_buffer.h:5,
                 from include/linux/ftrace_event.h:4,
                 from include/trace/syscall.h:6,
                 from include/linux/syscalls.h:78,
                 from init/main.c:16:
include/linux/debug_locks.h: In function ‘__debug_locks_off’:
include/linux/debug_locks.h:16:2: error: implicit declaration of function ‘xchg’

There is no indirect inclusions of the new asm/cmpxchg.h for m68k here.
Looking at most other architectures they include asm/cmpxchg.h in their
asm/atomic.h. M68k currently does not do this. Including this in atomic.h
fixes all m68k build problems.

Signed-off-by: Greg Ungerer <gerg@uclinux.org>
Acked-by: David Howells <dhowells@redhat.com>
Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
2012-04-01 22:57:49 +02:00

220 lines
4.3 KiB
C

#ifndef __ARCH_M68K_ATOMIC__
#define __ARCH_M68K_ATOMIC__
#include <linux/types.h>
#include <linux/irqflags.h>
#include <asm/cmpxchg.h>
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*/
/*
* We do not have SMP m68k systems, so we don't have to deal with that.
*/
#define ATOMIC_INIT(i) { (i) }
#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic_set(v, i) (((v)->counter) = i)
/*
* The ColdFire parts cannot do some immediate to memory operations,
* so for them we do not specify the "i" asm constraint.
*/
#ifdef CONFIG_COLDFIRE
#define ASM_DI "d"
#else
#define ASM_DI "di"
#endif
static inline void atomic_add(int i, atomic_t *v)
{
__asm__ __volatile__("addl %1,%0" : "+m" (*v) : ASM_DI (i));
}
static inline void atomic_sub(int i, atomic_t *v)
{
__asm__ __volatile__("subl %1,%0" : "+m" (*v) : ASM_DI (i));
}
static inline void atomic_inc(atomic_t *v)
{
__asm__ __volatile__("addql #1,%0" : "+m" (*v));
}
static inline void atomic_dec(atomic_t *v)
{
__asm__ __volatile__("subql #1,%0" : "+m" (*v));
}
static inline int atomic_dec_and_test(atomic_t *v)
{
char c;
__asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
return c != 0;
}
static inline int atomic_dec_and_test_lt(atomic_t *v)
{
char c;
__asm__ __volatile__(
"subql #1,%1; slt %0"
: "=d" (c), "=m" (*v)
: "m" (*v));
return c != 0;
}
static inline int atomic_inc_and_test(atomic_t *v)
{
char c;
__asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
return c != 0;
}
#ifdef CONFIG_RMW_INSNS
static inline int atomic_add_return(int i, atomic_t *v)
{
int t, tmp;
__asm__ __volatile__(
"1: movel %2,%1\n"
" addl %3,%1\n"
" casl %2,%1,%0\n"
" jne 1b"
: "+m" (*v), "=&d" (t), "=&d" (tmp)
: "g" (i), "2" (atomic_read(v)));
return t;
}
static inline int atomic_sub_return(int i, atomic_t *v)
{
int t, tmp;
__asm__ __volatile__(
"1: movel %2,%1\n"
" subl %3,%1\n"
" casl %2,%1,%0\n"
" jne 1b"
: "+m" (*v), "=&d" (t), "=&d" (tmp)
: "g" (i), "2" (atomic_read(v)));
return t;
}
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#else /* !CONFIG_RMW_INSNS */
static inline int atomic_add_return(int i, atomic_t * v)
{
unsigned long flags;
int t;
local_irq_save(flags);
t = atomic_read(v);
t += i;
atomic_set(v, t);
local_irq_restore(flags);
return t;
}
static inline int atomic_sub_return(int i, atomic_t * v)
{
unsigned long flags;
int t;
local_irq_save(flags);
t = atomic_read(v);
t -= i;
atomic_set(v, t);
local_irq_restore(flags);
return t;
}
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
unsigned long flags;
int prev;
local_irq_save(flags);
prev = atomic_read(v);
if (prev == old)
atomic_set(v, new);
local_irq_restore(flags);
return prev;
}
static inline int atomic_xchg(atomic_t *v, int new)
{
unsigned long flags;
int prev;
local_irq_save(flags);
prev = atomic_read(v);
atomic_set(v, new);
local_irq_restore(flags);
return prev;
}
#endif /* !CONFIG_RMW_INSNS */
#define atomic_dec_return(v) atomic_sub_return(1, (v))
#define atomic_inc_return(v) atomic_add_return(1, (v))
static inline int atomic_sub_and_test(int i, atomic_t *v)
{
char c;
__asm__ __volatile__("subl %2,%1; seq %0"
: "=d" (c), "+m" (*v)
: ASM_DI (i));
return c != 0;
}
static inline int atomic_add_negative(int i, atomic_t *v)
{
char c;
__asm__ __volatile__("addl %2,%1; smi %0"
: "=d" (c), "+m" (*v)
: ASM_DI (i));
return c != 0;
}
static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
{
__asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask)));
}
static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
{
__asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
}
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
for (;;) {
if (unlikely(c == (u)))
break;
old = atomic_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c;
}
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#endif /* __ARCH_M68K_ATOMIC __ */