1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_ATOMIC_H
3#define _ASM_X86_ATOMIC_H
4
5#include <linux/compiler.h>
6#include <linux/types.h>
7#include <asm/alternative.h>
8#include <asm/cmpxchg.h>
9#include <asm/rmwcc.h>
10#include <asm/barrier.h>
11
12/*
13 * Atomic operations that C can't guarantee us.  Useful for
14 * resource counting etc..
15 */
16
17static __always_inline int arch_atomic_read(const atomic_t *v)
18{
19	/*
20	 * Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here,
21	 * it's non-inlined function that increases binary size and stack usage.
22	 */
23	return __READ_ONCE((v)->counter);
24}
25
26static __always_inline void arch_atomic_set(atomic_t *v, int i)
27{
28	__WRITE_ONCE(v->counter, i);
29}
30
31static __always_inline void arch_atomic_add(int i, atomic_t *v)
32{
33	asm volatile(LOCK_PREFIX "addl %1,%0"
34		     : "+m" (v->counter)
35		     : "ir" (i) : "memory");
36}
37
38static __always_inline void arch_atomic_sub(int i, atomic_t *v)
39{
40	asm volatile(LOCK_PREFIX "subl %1,%0"
41		     : "+m" (v->counter)
42		     : "ir" (i) : "memory");
43}
44
45static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
46{
47	return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i);
48}
49#define arch_atomic_sub_and_test arch_atomic_sub_and_test
50
51static __always_inline void arch_atomic_inc(atomic_t *v)
52{
53	asm volatile(LOCK_PREFIX "incl %0"
54		     : "+m" (v->counter) :: "memory");
55}
56#define arch_atomic_inc arch_atomic_inc
57
58static __always_inline void arch_atomic_dec(atomic_t *v)
59{
60	asm volatile(LOCK_PREFIX "decl %0"
61		     : "+m" (v->counter) :: "memory");
62}
63#define arch_atomic_dec arch_atomic_dec
64
65static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
66{
67	return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e);
68}
69#define arch_atomic_dec_and_test arch_atomic_dec_and_test
70
71static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
72{
73	return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e);
74}
75#define arch_atomic_inc_and_test arch_atomic_inc_and_test
76
77static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
78{
79	return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, s, "er", i);
80}
81#define arch_atomic_add_negative arch_atomic_add_negative
82
83static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
84{
85	return i + xadd(&v->counter, i);
86}
87#define arch_atomic_add_return arch_atomic_add_return
88
89#define arch_atomic_sub_return(i, v) arch_atomic_add_return(-(i), v)
90
91static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
92{
93	return xadd(&v->counter, i);
94}
95#define arch_atomic_fetch_add arch_atomic_fetch_add
96
97#define arch_atomic_fetch_sub(i, v) arch_atomic_fetch_add(-(i), v)
98
99static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
100{
101	return arch_cmpxchg(&v->counter, old, new);
102}
103#define arch_atomic_cmpxchg arch_atomic_cmpxchg
104
105static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
106{
107	return arch_try_cmpxchg(&v->counter, old, new);
108}
109#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
110
111static __always_inline int arch_atomic_xchg(atomic_t *v, int new)
112{
113	return arch_xchg(&v->counter, new);
114}
115#define arch_atomic_xchg arch_atomic_xchg
116
117static __always_inline void arch_atomic_and(int i, atomic_t *v)
118{
119	asm volatile(LOCK_PREFIX "andl %1,%0"
120			: "+m" (v->counter)
121			: "ir" (i)
122			: "memory");
123}
124
125static __always_inline int arch_atomic_fetch_and(int i, atomic_t *v)
126{
127	int val = arch_atomic_read(v);
128
129	do { } while (!arch_atomic_try_cmpxchg(v, &val, val & i));
130
131	return val;
132}
133#define arch_atomic_fetch_and arch_atomic_fetch_and
134
135static __always_inline void arch_atomic_or(int i, atomic_t *v)
136{
137	asm volatile(LOCK_PREFIX "orl %1,%0"
138			: "+m" (v->counter)
139			: "ir" (i)
140			: "memory");
141}
142
143static __always_inline int arch_atomic_fetch_or(int i, atomic_t *v)
144{
145	int val = arch_atomic_read(v);
146
147	do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i));
148
149	return val;
150}
151#define arch_atomic_fetch_or arch_atomic_fetch_or
152
153static __always_inline void arch_atomic_xor(int i, atomic_t *v)
154{
155	asm volatile(LOCK_PREFIX "xorl %1,%0"
156			: "+m" (v->counter)
157			: "ir" (i)
158			: "memory");
159}
160
161static __always_inline int arch_atomic_fetch_xor(int i, atomic_t *v)
162{
163	int val = arch_atomic_read(v);
164
165	do { } while (!arch_atomic_try_cmpxchg(v, &val, val ^ i));
166
167	return val;
168}
169#define arch_atomic_fetch_xor arch_atomic_fetch_xor
170
171#ifdef CONFIG_X86_32
172# include <asm/atomic64_32.h>
173#else
174# include <asm/atomic64_64.h>
175#endif
176
177#endif /* _ASM_X86_ATOMIC_H */
178