1// SPDX-License-Identifier: GPL-2.0
2/* Converted from tools/testing/selftests/bpf/verifier/raw_stack.c */
3
4#include <linux/bpf.h>
5#include <bpf/bpf_helpers.h>
6#include "bpf_misc.h"
7
8SEC("socket")
9__description("raw_stack: no skb_load_bytes")
10__success
11__failure_unpriv __msg_unpriv("invalid read from stack R6 off=-8 size=8")
12__naked void stack_no_skb_load_bytes(void)
13{
14	asm volatile ("					\
15	r2 = 4;						\
16	r6 = r10;					\
17	r6 += -8;					\
18	r3 = r6;					\
19	r4 = 8;						\
20	/* Call to skb_load_bytes() omitted. */		\
21	r0 = *(u64*)(r6 + 0);				\
22	exit;						\
23"	::: __clobber_all);
24}
25
26SEC("tc")
27__description("raw_stack: skb_load_bytes, negative len")
28__failure __msg("R4 min value is negative")
29__naked void skb_load_bytes_negative_len(void)
30{
31	asm volatile ("					\
32	r2 = 4;						\
33	r6 = r10;					\
34	r6 += -8;					\
35	r3 = r6;					\
36	r4 = -8;					\
37	call %[bpf_skb_load_bytes];			\
38	r0 = *(u64*)(r6 + 0);				\
39	exit;						\
40"	:
41	: __imm(bpf_skb_load_bytes)
42	: __clobber_all);
43}
44
45SEC("tc")
46__description("raw_stack: skb_load_bytes, negative len 2")
47__failure __msg("R4 min value is negative")
48__naked void load_bytes_negative_len_2(void)
49{
50	asm volatile ("					\
51	r2 = 4;						\
52	r6 = r10;					\
53	r6 += -8;					\
54	r3 = r6;					\
55	r4 = %[__imm_0];				\
56	call %[bpf_skb_load_bytes];			\
57	r0 = *(u64*)(r6 + 0);				\
58	exit;						\
59"	:
60	: __imm(bpf_skb_load_bytes),
61	  __imm_const(__imm_0, ~0)
62	: __clobber_all);
63}
64
65SEC("tc")
66__description("raw_stack: skb_load_bytes, zero len")
67__failure __msg("R4 invalid zero-sized read: u64=[0,0]")
68__naked void skb_load_bytes_zero_len(void)
69{
70	asm volatile ("					\
71	r2 = 4;						\
72	r6 = r10;					\
73	r6 += -8;					\
74	r3 = r6;					\
75	r4 = 0;						\
76	call %[bpf_skb_load_bytes];			\
77	r0 = *(u64*)(r6 + 0);				\
78	exit;						\
79"	:
80	: __imm(bpf_skb_load_bytes)
81	: __clobber_all);
82}
83
84SEC("tc")
85__description("raw_stack: skb_load_bytes, no init")
86__success __retval(0)
87__naked void skb_load_bytes_no_init(void)
88{
89	asm volatile ("					\
90	r2 = 4;						\
91	r6 = r10;					\
92	r6 += -8;					\
93	r3 = r6;					\
94	r4 = 8;						\
95	call %[bpf_skb_load_bytes];			\
96	r0 = *(u64*)(r6 + 0);				\
97	exit;						\
98"	:
99	: __imm(bpf_skb_load_bytes)
100	: __clobber_all);
101}
102
103SEC("tc")
104__description("raw_stack: skb_load_bytes, init")
105__success __retval(0)
106__naked void stack_skb_load_bytes_init(void)
107{
108	asm volatile ("					\
109	r2 = 4;						\
110	r6 = r10;					\
111	r6 += -8;					\
112	r3 = 0xcafe;					\
113	*(u64*)(r6 + 0) = r3;				\
114	r3 = r6;					\
115	r4 = 8;						\
116	call %[bpf_skb_load_bytes];			\
117	r0 = *(u64*)(r6 + 0);				\
118	exit;						\
119"	:
120	: __imm(bpf_skb_load_bytes)
121	: __clobber_all);
122}
123
124SEC("tc")
125__description("raw_stack: skb_load_bytes, spilled regs around bounds")
126__success __retval(0)
127__naked void bytes_spilled_regs_around_bounds(void)
128{
129	asm volatile ("					\
130	r2 = 4;						\
131	r6 = r10;					\
132	r6 += -16;					\
133	*(u64*)(r6 - 8) = r1;				\
134	*(u64*)(r6 + 8) = r1;				\
135	r3 = r6;					\
136	r4 = 8;						\
137	call %[bpf_skb_load_bytes];			\
138	r0 = *(u64*)(r6 - 8);				\
139	r2 = *(u64*)(r6 + 8);				\
140	r0 = *(u32*)(r0 + %[__sk_buff_mark]);		\
141	r2 = *(u32*)(r2 + %[__sk_buff_priority]);	\
142	r0 += r2;					\
143	exit;						\
144"	:
145	: __imm(bpf_skb_load_bytes),
146	  __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
147	  __imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority))
148	: __clobber_all);
149}
150
151SEC("tc")
152__description("raw_stack: skb_load_bytes, spilled regs corruption")
153__failure __msg("R0 invalid mem access 'scalar'")
154__flag(BPF_F_ANY_ALIGNMENT)
155__naked void load_bytes_spilled_regs_corruption(void)
156{
157	asm volatile ("					\
158	r2 = 4;						\
159	r6 = r10;					\
160	r6 += -8;					\
161	*(u64*)(r6 + 0) = r1;				\
162	r3 = r6;					\
163	r4 = 8;						\
164	call %[bpf_skb_load_bytes];			\
165	r0 = *(u64*)(r6 + 0);				\
166	r0 = *(u32*)(r0 + %[__sk_buff_mark]);		\
167	exit;						\
168"	:
169	: __imm(bpf_skb_load_bytes),
170	  __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
171	: __clobber_all);
172}
173
174SEC("tc")
175__description("raw_stack: skb_load_bytes, spilled regs corruption 2")
176__failure __msg("R3 invalid mem access 'scalar'")
177__flag(BPF_F_ANY_ALIGNMENT)
178__naked void bytes_spilled_regs_corruption_2(void)
179{
180	asm volatile ("					\
181	r2 = 4;						\
182	r6 = r10;					\
183	r6 += -16;					\
184	*(u64*)(r6 - 8) = r1;				\
185	*(u64*)(r6 + 0) = r1;				\
186	*(u64*)(r6 + 8) = r1;				\
187	r3 = r6;					\
188	r4 = 8;						\
189	call %[bpf_skb_load_bytes];			\
190	r0 = *(u64*)(r6 - 8);				\
191	r2 = *(u64*)(r6 + 8);				\
192	r3 = *(u64*)(r6 + 0);				\
193	r0 = *(u32*)(r0 + %[__sk_buff_mark]);		\
194	r2 = *(u32*)(r2 + %[__sk_buff_priority]);	\
195	r0 += r2;					\
196	r3 = *(u32*)(r3 + %[__sk_buff_pkt_type]);	\
197	r0 += r3;					\
198	exit;						\
199"	:
200	: __imm(bpf_skb_load_bytes),
201	  __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
202	  __imm_const(__sk_buff_pkt_type, offsetof(struct __sk_buff, pkt_type)),
203	  __imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority))
204	: __clobber_all);
205}
206
207SEC("tc")
208__description("raw_stack: skb_load_bytes, spilled regs + data")
209__success __retval(0)
210__naked void load_bytes_spilled_regs_data(void)
211{
212	asm volatile ("					\
213	r2 = 4;						\
214	r6 = r10;					\
215	r6 += -16;					\
216	*(u64*)(r6 - 8) = r1;				\
217	*(u64*)(r6 + 0) = r1;				\
218	*(u64*)(r6 + 8) = r1;				\
219	r3 = r6;					\
220	r4 = 8;						\
221	call %[bpf_skb_load_bytes];			\
222	r0 = *(u64*)(r6 - 8);				\
223	r2 = *(u64*)(r6 + 8);				\
224	r3 = *(u64*)(r6 + 0);				\
225	r0 = *(u32*)(r0 + %[__sk_buff_mark]);		\
226	r2 = *(u32*)(r2 + %[__sk_buff_priority]);	\
227	r0 += r2;					\
228	r0 += r3;					\
229	exit;						\
230"	:
231	: __imm(bpf_skb_load_bytes),
232	  __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
233	  __imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority))
234	: __clobber_all);
235}
236
237SEC("tc")
238__description("raw_stack: skb_load_bytes, invalid access 1")
239__failure __msg("invalid indirect access to stack R3 off=-513 size=8")
240__naked void load_bytes_invalid_access_1(void)
241{
242	asm volatile ("					\
243	r2 = 4;						\
244	r6 = r10;					\
245	r6 += -513;					\
246	r3 = r6;					\
247	r4 = 8;						\
248	call %[bpf_skb_load_bytes];			\
249	r0 = *(u64*)(r6 + 0);				\
250	exit;						\
251"	:
252	: __imm(bpf_skb_load_bytes)
253	: __clobber_all);
254}
255
256SEC("tc")
257__description("raw_stack: skb_load_bytes, invalid access 2")
258__failure __msg("invalid indirect access to stack R3 off=-1 size=8")
259__naked void load_bytes_invalid_access_2(void)
260{
261	asm volatile ("					\
262	r2 = 4;						\
263	r6 = r10;					\
264	r6 += -1;					\
265	r3 = r6;					\
266	r4 = 8;						\
267	call %[bpf_skb_load_bytes];			\
268	r0 = *(u64*)(r6 + 0);				\
269	exit;						\
270"	:
271	: __imm(bpf_skb_load_bytes)
272	: __clobber_all);
273}
274
275SEC("tc")
276__description("raw_stack: skb_load_bytes, invalid access 3")
277__failure __msg("R4 min value is negative")
278__naked void load_bytes_invalid_access_3(void)
279{
280	asm volatile ("					\
281	r2 = 4;						\
282	r6 = r10;					\
283	r6 += 0xffffffff;				\
284	r3 = r6;					\
285	r4 = 0xffffffff;				\
286	call %[bpf_skb_load_bytes];			\
287	r0 = *(u64*)(r6 + 0);				\
288	exit;						\
289"	:
290	: __imm(bpf_skb_load_bytes)
291	: __clobber_all);
292}
293
294SEC("tc")
295__description("raw_stack: skb_load_bytes, invalid access 4")
296__failure
297__msg("R4 unbounded memory access, use 'var &= const' or 'if (var < const)'")
298__naked void load_bytes_invalid_access_4(void)
299{
300	asm volatile ("					\
301	r2 = 4;						\
302	r6 = r10;					\
303	r6 += -1;					\
304	r3 = r6;					\
305	r4 = 0x7fffffff;				\
306	call %[bpf_skb_load_bytes];			\
307	r0 = *(u64*)(r6 + 0);				\
308	exit;						\
309"	:
310	: __imm(bpf_skb_load_bytes)
311	: __clobber_all);
312}
313
314SEC("tc")
315__description("raw_stack: skb_load_bytes, invalid access 5")
316__failure
317__msg("R4 unbounded memory access, use 'var &= const' or 'if (var < const)'")
318__naked void load_bytes_invalid_access_5(void)
319{
320	asm volatile ("					\
321	r2 = 4;						\
322	r6 = r10;					\
323	r6 += -512;					\
324	r3 = r6;					\
325	r4 = 0x7fffffff;				\
326	call %[bpf_skb_load_bytes];			\
327	r0 = *(u64*)(r6 + 0);				\
328	exit;						\
329"	:
330	: __imm(bpf_skb_load_bytes)
331	: __clobber_all);
332}
333
334SEC("tc")
335__description("raw_stack: skb_load_bytes, invalid access 6")
336__failure __msg("invalid zero-sized read")
337__naked void load_bytes_invalid_access_6(void)
338{
339	asm volatile ("					\
340	r2 = 4;						\
341	r6 = r10;					\
342	r6 += -512;					\
343	r3 = r6;					\
344	r4 = 0;						\
345	call %[bpf_skb_load_bytes];			\
346	r0 = *(u64*)(r6 + 0);				\
347	exit;						\
348"	:
349	: __imm(bpf_skb_load_bytes)
350	: __clobber_all);
351}
352
353SEC("tc")
354__description("raw_stack: skb_load_bytes, large access")
355__success __retval(0)
356__naked void skb_load_bytes_large_access(void)
357{
358	asm volatile ("					\
359	r2 = 4;						\
360	r6 = r10;					\
361	r6 += -512;					\
362	r3 = r6;					\
363	r4 = 512;					\
364	call %[bpf_skb_load_bytes];			\
365	r0 = *(u64*)(r6 + 0);				\
366	exit;						\
367"	:
368	: __imm(bpf_skb_load_bytes)
369	: __clobber_all);
370}
371
372char _license[] SEC("license") = "GPL";
373