blob: 33d191d295e4ffc3d66393415df66aa7798f9976 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
|
/*
* Copyright IBM Corp. 1999, 2009
*
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef __ASM_BARRIER_H
#define __ASM_BARRIER_H
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
* to devices.
*/
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
/* Fast-BCR without checkpoint synchronization */
#define __ASM_BARRIER "bcr 14,0\n"
#else
#define __ASM_BARRIER "bcr 15,0\n"
#endif
#define mb() do { asm volatile(__ASM_BARRIER : : : "memory"); } while (0)
#define rmb() mb()
#define wmb() mb()
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define read_barrier_depends() do { } while (0)
#define smp_read_barrier_depends() do { } while (0)
#define smp_mb__before_atomic() smp_mb()
#define smp_mb__after_atomic() smp_mb()
#define set_mb(var, value) do { var = value; mb(); } while (0)
#define smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
barrier(); \
ACCESS_ONCE(*p) = (v); \
} while (0)
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
barrier(); \
___p1; \
})
#endif /* __ASM_BARRIER_H */
|