mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
The following build error occurred during an alpha build:
net/core/sock.c:274:36: error: initializer element is not constant
Dave Anglin says:
> Here is the line in sock.i:
>
> struct static_key memalloc_socks = ((struct static_key) { .enabled =
> ((atomic_t) { (0) }) });
The above line contains two compound literals. It also uses a designated
initializer to initialize the field enabled. A compound literal is not a
constant expression.
The location of the above statement isn't fully clear, but if a compound
literal occurs outside the body of a function, the initializer list must
consist of constant expressions.
Cc: <stable@vger.kernel.org>
Signed-off-by: Mel Gorman <mgorman@suse.de>
Signed-off-by: Fengguang Wu <fengguang.wu@intel.com>
Signed-off-by: Michael Cree <mcree@orcon.net.nz>
Acked-by: Matt Turner <mattst88@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
259 lines
5.9 KiB
C
259 lines
5.9 KiB
C
#ifndef _ALPHA_ATOMIC_H
|
|
#define _ALPHA_ATOMIC_H
|
|
|
|
#include <linux/types.h>
|
|
#include <asm/barrier.h>
|
|
#include <asm/cmpxchg.h>
|
|
|
|
/*
|
|
* Atomic operations that C can't guarantee us. Useful for
|
|
* resource counting etc...
|
|
*
|
|
* But use these as seldom as possible since they are much slower
|
|
* than regular operations.
|
|
*/
|
|
|
|
|
|
#define ATOMIC_INIT(i) { (i) }
|
|
#define ATOMIC64_INIT(i) { (i) }
|
|
|
|
#define atomic_read(v) (*(volatile int *)&(v)->counter)
|
|
#define atomic64_read(v) (*(volatile long *)&(v)->counter)
|
|
|
|
#define atomic_set(v,i) ((v)->counter = (i))
|
|
#define atomic64_set(v,i) ((v)->counter = (i))
|
|
|
|
/*
|
|
* To get proper branch prediction for the main line, we must branch
|
|
* forward to code at the end of this object's .text section, then
|
|
* branch back to restart the operation.
|
|
*/
|
|
|
|
static __inline__ void atomic_add(int i, atomic_t * v)
|
|
{
|
|
unsigned long temp;
|
|
__asm__ __volatile__(
|
|
"1: ldl_l %0,%1\n"
|
|
" addl %0,%2,%0\n"
|
|
" stl_c %0,%1\n"
|
|
" beq %0,2f\n"
|
|
".subsection 2\n"
|
|
"2: br 1b\n"
|
|
".previous"
|
|
:"=&r" (temp), "=m" (v->counter)
|
|
:"Ir" (i), "m" (v->counter));
|
|
}
|
|
|
|
static __inline__ void atomic64_add(long i, atomic64_t * v)
|
|
{
|
|
unsigned long temp;
|
|
__asm__ __volatile__(
|
|
"1: ldq_l %0,%1\n"
|
|
" addq %0,%2,%0\n"
|
|
" stq_c %0,%1\n"
|
|
" beq %0,2f\n"
|
|
".subsection 2\n"
|
|
"2: br 1b\n"
|
|
".previous"
|
|
:"=&r" (temp), "=m" (v->counter)
|
|
:"Ir" (i), "m" (v->counter));
|
|
}
|
|
|
|
static __inline__ void atomic_sub(int i, atomic_t * v)
|
|
{
|
|
unsigned long temp;
|
|
__asm__ __volatile__(
|
|
"1: ldl_l %0,%1\n"
|
|
" subl %0,%2,%0\n"
|
|
" stl_c %0,%1\n"
|
|
" beq %0,2f\n"
|
|
".subsection 2\n"
|
|
"2: br 1b\n"
|
|
".previous"
|
|
:"=&r" (temp), "=m" (v->counter)
|
|
:"Ir" (i), "m" (v->counter));
|
|
}
|
|
|
|
static __inline__ void atomic64_sub(long i, atomic64_t * v)
|
|
{
|
|
unsigned long temp;
|
|
__asm__ __volatile__(
|
|
"1: ldq_l %0,%1\n"
|
|
" subq %0,%2,%0\n"
|
|
" stq_c %0,%1\n"
|
|
" beq %0,2f\n"
|
|
".subsection 2\n"
|
|
"2: br 1b\n"
|
|
".previous"
|
|
:"=&r" (temp), "=m" (v->counter)
|
|
:"Ir" (i), "m" (v->counter));
|
|
}
|
|
|
|
|
|
/*
|
|
* Same as above, but return the result value
|
|
*/
|
|
static inline int atomic_add_return(int i, atomic_t *v)
|
|
{
|
|
long temp, result;
|
|
smp_mb();
|
|
__asm__ __volatile__(
|
|
"1: ldl_l %0,%1\n"
|
|
" addl %0,%3,%2\n"
|
|
" addl %0,%3,%0\n"
|
|
" stl_c %0,%1\n"
|
|
" beq %0,2f\n"
|
|
".subsection 2\n"
|
|
"2: br 1b\n"
|
|
".previous"
|
|
:"=&r" (temp), "=m" (v->counter), "=&r" (result)
|
|
:"Ir" (i), "m" (v->counter) : "memory");
|
|
smp_mb();
|
|
return result;
|
|
}
|
|
|
|
static __inline__ long atomic64_add_return(long i, atomic64_t * v)
|
|
{
|
|
long temp, result;
|
|
smp_mb();
|
|
__asm__ __volatile__(
|
|
"1: ldq_l %0,%1\n"
|
|
" addq %0,%3,%2\n"
|
|
" addq %0,%3,%0\n"
|
|
" stq_c %0,%1\n"
|
|
" beq %0,2f\n"
|
|
".subsection 2\n"
|
|
"2: br 1b\n"
|
|
".previous"
|
|
:"=&r" (temp), "=m" (v->counter), "=&r" (result)
|
|
:"Ir" (i), "m" (v->counter) : "memory");
|
|
smp_mb();
|
|
return result;
|
|
}
|
|
|
|
static __inline__ long atomic_sub_return(int i, atomic_t * v)
|
|
{
|
|
long temp, result;
|
|
smp_mb();
|
|
__asm__ __volatile__(
|
|
"1: ldl_l %0,%1\n"
|
|
" subl %0,%3,%2\n"
|
|
" subl %0,%3,%0\n"
|
|
" stl_c %0,%1\n"
|
|
" beq %0,2f\n"
|
|
".subsection 2\n"
|
|
"2: br 1b\n"
|
|
".previous"
|
|
:"=&r" (temp), "=m" (v->counter), "=&r" (result)
|
|
:"Ir" (i), "m" (v->counter) : "memory");
|
|
smp_mb();
|
|
return result;
|
|
}
|
|
|
|
static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
|
|
{
|
|
long temp, result;
|
|
smp_mb();
|
|
__asm__ __volatile__(
|
|
"1: ldq_l %0,%1\n"
|
|
" subq %0,%3,%2\n"
|
|
" subq %0,%3,%0\n"
|
|
" stq_c %0,%1\n"
|
|
" beq %0,2f\n"
|
|
".subsection 2\n"
|
|
"2: br 1b\n"
|
|
".previous"
|
|
:"=&r" (temp), "=m" (v->counter), "=&r" (result)
|
|
:"Ir" (i), "m" (v->counter) : "memory");
|
|
smp_mb();
|
|
return result;
|
|
}
|
|
|
|
#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
|
|
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
|
|
|
|
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
|
|
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
|
|
|
/**
|
|
* __atomic_add_unless - add unless the number is a given value
|
|
* @v: pointer of type atomic_t
|
|
* @a: the amount to add to v...
|
|
* @u: ...unless v is equal to u.
|
|
*
|
|
* Atomically adds @a to @v, so long as it was not @u.
|
|
* Returns the old value of @v.
|
|
*/
|
|
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
|
|
{
|
|
int c, old;
|
|
c = atomic_read(v);
|
|
for (;;) {
|
|
if (unlikely(c == (u)))
|
|
break;
|
|
old = atomic_cmpxchg((v), c, c + (a));
|
|
if (likely(old == c))
|
|
break;
|
|
c = old;
|
|
}
|
|
return c;
|
|
}
|
|
|
|
|
|
/**
|
|
* atomic64_add_unless - add unless the number is a given value
|
|
* @v: pointer of type atomic64_t
|
|
* @a: the amount to add to v...
|
|
* @u: ...unless v is equal to u.
|
|
*
|
|
* Atomically adds @a to @v, so long as it was not @u.
|
|
* Returns the old value of @v.
|
|
*/
|
|
static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
|
|
{
|
|
long c, old;
|
|
c = atomic64_read(v);
|
|
for (;;) {
|
|
if (unlikely(c == (u)))
|
|
break;
|
|
old = atomic64_cmpxchg((v), c, c + (a));
|
|
if (likely(old == c))
|
|
break;
|
|
c = old;
|
|
}
|
|
return c != (u);
|
|
}
|
|
|
|
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
|
|
|
|
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
|
|
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
|
|
|
|
#define atomic_dec_return(v) atomic_sub_return(1,(v))
|
|
#define atomic64_dec_return(v) atomic64_sub_return(1,(v))
|
|
|
|
#define atomic_inc_return(v) atomic_add_return(1,(v))
|
|
#define atomic64_inc_return(v) atomic64_add_return(1,(v))
|
|
|
|
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
|
|
#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
|
|
|
|
#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
|
|
#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
|
|
|
|
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
|
|
#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
|
|
|
|
#define atomic_inc(v) atomic_add(1,(v))
|
|
#define atomic64_inc(v) atomic64_add(1,(v))
|
|
|
|
#define atomic_dec(v) atomic_sub(1,(v))
|
|
#define atomic64_dec(v) atomic64_sub(1,(v))
|
|
|
|
#define smp_mb__before_atomic_dec() smp_mb()
|
|
#define smp_mb__after_atomic_dec() smp_mb()
|
|
#define smp_mb__before_atomic_inc() smp_mb()
|
|
#define smp_mb__after_atomic_inc() smp_mb()
|
|
|
|
#endif /* _ALPHA_ATOMIC_H */
|