1// SPDX-License-Identifier: GPL-2.0
2/* Converted from tools/testing/selftests/bpf/verifier/sock.c */
3
4#include <linux/bpf.h>
5#include <bpf/bpf_helpers.h>
6#include "bpf_misc.h"
7
8#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
9#define offsetofend(TYPE, MEMBER) \
10	(offsetof(TYPE, MEMBER)	+ sizeof_field(TYPE, MEMBER))
11
12struct {
13	__uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
14	__uint(max_entries, 1);
15	__type(key, __u32);
16	__type(value, __u64);
17} map_reuseport_array SEC(".maps");
18
19struct {
20	__uint(type, BPF_MAP_TYPE_SOCKHASH);
21	__uint(max_entries, 1);
22	__type(key, int);
23	__type(value, int);
24} map_sockhash SEC(".maps");
25
26struct {
27	__uint(type, BPF_MAP_TYPE_SOCKMAP);
28	__uint(max_entries, 1);
29	__type(key, int);
30	__type(value, int);
31} map_sockmap SEC(".maps");
32
33struct {
34	__uint(type, BPF_MAP_TYPE_XSKMAP);
35	__uint(max_entries, 1);
36	__type(key, int);
37	__type(value, int);
38} map_xskmap SEC(".maps");
39
40struct val {
41	int cnt;
42	struct bpf_spin_lock l;
43};
44
45struct {
46	__uint(type, BPF_MAP_TYPE_SK_STORAGE);
47	__uint(max_entries, 0);
48	__type(key, int);
49	__type(value, struct val);
50	__uint(map_flags, BPF_F_NO_PREALLOC);
51} sk_storage_map SEC(".maps");
52
53SEC("cgroup/skb")
54__description("skb->sk: no NULL check")
55__failure __msg("invalid mem access 'sock_common_or_null'")
56__failure_unpriv
57__naked void skb_sk_no_null_check(void)
58{
59	asm volatile ("					\
60	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
61	r0 = *(u32*)(r1 + 0);				\
62	r0 = 0;						\
63	exit;						\
64"	:
65	: __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
66	: __clobber_all);
67}
68
69SEC("cgroup/skb")
70__description("skb->sk: sk->family [non fullsock field]")
71__success __success_unpriv __retval(0)
72__naked void sk_family_non_fullsock_field_1(void)
73{
74	asm volatile ("					\
75	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
76	if r1 != 0 goto l0_%=;				\
77	r0 = 0;						\
78	exit;						\
79l0_%=:	r0 = *(u32*)(r1 + %[bpf_sock_family]);		\
80	r0 = 0;						\
81	exit;						\
82"	:
83	: __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
84	  __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family))
85	: __clobber_all);
86}
87
88SEC("cgroup/skb")
89__description("skb->sk: sk->type [fullsock field]")
90__failure __msg("invalid sock_common access")
91__failure_unpriv
92__naked void sk_sk_type_fullsock_field_1(void)
93{
94	asm volatile ("					\
95	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
96	if r1 != 0 goto l0_%=;				\
97	r0 = 0;						\
98	exit;						\
99l0_%=:	r0 = *(u32*)(r1 + %[bpf_sock_type]);		\
100	r0 = 0;						\
101	exit;						\
102"	:
103	: __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
104	  __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
105	: __clobber_all);
106}
107
108SEC("cgroup/skb")
109__description("bpf_sk_fullsock(skb->sk): no !skb->sk check")
110__failure __msg("type=sock_common_or_null expected=sock_common")
111__failure_unpriv
112__naked void sk_no_skb_sk_check_1(void)
113{
114	asm volatile ("					\
115	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
116	call %[bpf_sk_fullsock];			\
117	r0 = 0;						\
118	exit;						\
119"	:
120	: __imm(bpf_sk_fullsock),
121	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
122	: __clobber_all);
123}
124
125SEC("cgroup/skb")
126__description("sk_fullsock(skb->sk): no NULL check on ret")
127__failure __msg("invalid mem access 'sock_or_null'")
128__failure_unpriv
129__naked void no_null_check_on_ret_1(void)
130{
131	asm volatile ("					\
132	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
133	if r1 != 0 goto l0_%=;				\
134	r0 = 0;						\
135	exit;						\
136l0_%=:	call %[bpf_sk_fullsock];			\
137	r0 = *(u32*)(r0 + %[bpf_sock_type]);		\
138	r0 = 0;						\
139	exit;						\
140"	:
141	: __imm(bpf_sk_fullsock),
142	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
143	  __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
144	: __clobber_all);
145}
146
147SEC("cgroup/skb")
148__description("sk_fullsock(skb->sk): sk->type [fullsock field]")
149__success __success_unpriv __retval(0)
150__naked void sk_sk_type_fullsock_field_2(void)
151{
152	asm volatile ("					\
153	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
154	if r1 != 0 goto l0_%=;				\
155	r0 = 0;						\
156	exit;						\
157l0_%=:	call %[bpf_sk_fullsock];			\
158	if r0 != 0 goto l1_%=;				\
159	r0 = 0;						\
160	exit;						\
161l1_%=:	r0 = *(u32*)(r0 + %[bpf_sock_type]);		\
162	r0 = 0;						\
163	exit;						\
164"	:
165	: __imm(bpf_sk_fullsock),
166	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
167	  __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
168	: __clobber_all);
169}
170
171SEC("cgroup/skb")
172__description("sk_fullsock(skb->sk): sk->family [non fullsock field]")
173__success __success_unpriv __retval(0)
174__naked void sk_family_non_fullsock_field_2(void)
175{
176	asm volatile ("					\
177	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
178	if r1 != 0 goto l0_%=;				\
179	r0 = 0;						\
180	exit;						\
181l0_%=:	call %[bpf_sk_fullsock];			\
182	if r0 != 0 goto l1_%=;				\
183	exit;						\
184l1_%=:	r0 = *(u32*)(r0 + %[bpf_sock_family]);		\
185	r0 = 0;						\
186	exit;						\
187"	:
188	: __imm(bpf_sk_fullsock),
189	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
190	  __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family))
191	: __clobber_all);
192}
193
194SEC("cgroup/skb")
195__description("sk_fullsock(skb->sk): sk->state [narrow load]")
196__success __success_unpriv __retval(0)
197__naked void sk_sk_state_narrow_load(void)
198{
199	asm volatile ("					\
200	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
201	if r1 != 0 goto l0_%=;				\
202	r0 = 0;						\
203	exit;						\
204l0_%=:	call %[bpf_sk_fullsock];			\
205	if r0 != 0 goto l1_%=;				\
206	r0 = 0;						\
207	exit;						\
208l1_%=:	r0 = *(u8*)(r0 + %[bpf_sock_state]);		\
209	r0 = 0;						\
210	exit;						\
211"	:
212	: __imm(bpf_sk_fullsock),
213	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
214	  __imm_const(bpf_sock_state, offsetof(struct bpf_sock, state))
215	: __clobber_all);
216}
217
218SEC("cgroup/skb")
219__description("sk_fullsock(skb->sk): sk->dst_port [word load] (backward compatibility)")
220__success __success_unpriv __retval(0)
221__naked void port_word_load_backward_compatibility(void)
222{
223	asm volatile ("					\
224	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
225	if r1 != 0 goto l0_%=;				\
226	r0 = 0;						\
227	exit;						\
228l0_%=:	call %[bpf_sk_fullsock];			\
229	if r0 != 0 goto l1_%=;				\
230	r0 = 0;						\
231	exit;						\
232l1_%=:	r0 = *(u32*)(r0 + %[bpf_sock_dst_port]);	\
233	r0 = 0;						\
234	exit;						\
235"	:
236	: __imm(bpf_sk_fullsock),
237	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
238	  __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
239	: __clobber_all);
240}
241
242SEC("cgroup/skb")
243__description("sk_fullsock(skb->sk): sk->dst_port [half load]")
244__success __success_unpriv __retval(0)
245__naked void sk_dst_port_half_load(void)
246{
247	asm volatile ("					\
248	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
249	if r1 != 0 goto l0_%=;				\
250	r0 = 0;						\
251	exit;						\
252l0_%=:	call %[bpf_sk_fullsock];			\
253	if r0 != 0 goto l1_%=;				\
254	r0 = 0;						\
255	exit;						\
256l1_%=:	r0 = *(u16*)(r0 + %[bpf_sock_dst_port]);	\
257	r0 = 0;						\
258	exit;						\
259"	:
260	: __imm(bpf_sk_fullsock),
261	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
262	  __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
263	: __clobber_all);
264}
265
266SEC("cgroup/skb")
267__description("sk_fullsock(skb->sk): sk->dst_port [half load] (invalid)")
268__failure __msg("invalid sock access")
269__failure_unpriv
270__naked void dst_port_half_load_invalid_1(void)
271{
272	asm volatile ("					\
273	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
274	if r1 != 0 goto l0_%=;				\
275	r0 = 0;						\
276	exit;						\
277l0_%=:	call %[bpf_sk_fullsock];			\
278	if r0 != 0 goto l1_%=;				\
279	r0 = 0;						\
280	exit;						\
281l1_%=:	r0 = *(u16*)(r0 + %[__imm_0]);			\
282	r0 = 0;						\
283	exit;						\
284"	:
285	: __imm(bpf_sk_fullsock),
286	  __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2),
287	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
288	: __clobber_all);
289}
290
291SEC("cgroup/skb")
292__description("sk_fullsock(skb->sk): sk->dst_port [byte load]")
293__success __success_unpriv __retval(0)
294__naked void sk_dst_port_byte_load(void)
295{
296	asm volatile ("					\
297	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
298	if r1 != 0 goto l0_%=;				\
299	r0 = 0;						\
300	exit;						\
301l0_%=:	call %[bpf_sk_fullsock];			\
302	if r0 != 0 goto l1_%=;				\
303	r0 = 0;						\
304	exit;						\
305l1_%=:	r2 = *(u8*)(r0 + %[bpf_sock_dst_port]);		\
306	r2 = *(u8*)(r0 + %[__imm_0]);			\
307	r0 = 0;						\
308	exit;						\
309"	:
310	: __imm(bpf_sk_fullsock),
311	  __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 1),
312	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
313	  __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
314	: __clobber_all);
315}
316
317SEC("cgroup/skb")
318__description("sk_fullsock(skb->sk): sk->dst_port [byte load] (invalid)")
319__failure __msg("invalid sock access")
320__failure_unpriv
321__naked void dst_port_byte_load_invalid(void)
322{
323	asm volatile ("					\
324	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
325	if r1 != 0 goto l0_%=;				\
326	r0 = 0;						\
327	exit;						\
328l0_%=:	call %[bpf_sk_fullsock];			\
329	if r0 != 0 goto l1_%=;				\
330	r0 = 0;						\
331	exit;						\
332l1_%=:	r0 = *(u8*)(r0 + %[__imm_0]);			\
333	r0 = 0;						\
334	exit;						\
335"	:
336	: __imm(bpf_sk_fullsock),
337	  __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2),
338	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
339	: __clobber_all);
340}
341
342SEC("cgroup/skb")
343__description("sk_fullsock(skb->sk): past sk->dst_port [half load] (invalid)")
344__failure __msg("invalid sock access")
345__failure_unpriv
346__naked void dst_port_half_load_invalid_2(void)
347{
348	asm volatile ("					\
349	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
350	if r1 != 0 goto l0_%=;				\
351	r0 = 0;						\
352	exit;						\
353l0_%=:	call %[bpf_sk_fullsock];			\
354	if r0 != 0 goto l1_%=;				\
355	r0 = 0;						\
356	exit;						\
357l1_%=:	r0 = *(u16*)(r0 + %[bpf_sock_dst_port__end]);	\
358	r0 = 0;						\
359	exit;						\
360"	:
361	: __imm(bpf_sk_fullsock),
362	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
363	  __imm_const(bpf_sock_dst_port__end, offsetofend(struct bpf_sock, dst_port))
364	: __clobber_all);
365}
366
367SEC("cgroup/skb")
368__description("sk_fullsock(skb->sk): sk->dst_ip6 [load 2nd byte]")
369__success __success_unpriv __retval(0)
370__naked void dst_ip6_load_2nd_byte(void)
371{
372	asm volatile ("					\
373	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
374	if r1 != 0 goto l0_%=;				\
375	r0 = 0;						\
376	exit;						\
377l0_%=:	call %[bpf_sk_fullsock];			\
378	if r0 != 0 goto l1_%=;				\
379	r0 = 0;						\
380	exit;						\
381l1_%=:	r0 = *(u8*)(r0 + %[__imm_0]);			\
382	r0 = 0;						\
383	exit;						\
384"	:
385	: __imm(bpf_sk_fullsock),
386	  __imm_const(__imm_0, offsetof(struct bpf_sock, dst_ip6[0]) + 1),
387	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
388	: __clobber_all);
389}
390
391SEC("cgroup/skb")
392__description("sk_fullsock(skb->sk): sk->type [narrow load]")
393__success __success_unpriv __retval(0)
394__naked void sk_sk_type_narrow_load(void)
395{
396	asm volatile ("					\
397	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
398	if r1 != 0 goto l0_%=;				\
399	r0 = 0;						\
400	exit;						\
401l0_%=:	call %[bpf_sk_fullsock];			\
402	if r0 != 0 goto l1_%=;				\
403	r0 = 0;						\
404	exit;						\
405l1_%=:	r0 = *(u8*)(r0 + %[bpf_sock_type]);		\
406	r0 = 0;						\
407	exit;						\
408"	:
409	: __imm(bpf_sk_fullsock),
410	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
411	  __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
412	: __clobber_all);
413}
414
415SEC("cgroup/skb")
416__description("sk_fullsock(skb->sk): sk->protocol [narrow load]")
417__success __success_unpriv __retval(0)
418__naked void sk_sk_protocol_narrow_load(void)
419{
420	asm volatile ("					\
421	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
422	if r1 != 0 goto l0_%=;				\
423	r0 = 0;						\
424	exit;						\
425l0_%=:	call %[bpf_sk_fullsock];			\
426	if r0 != 0 goto l1_%=;				\
427	r0 = 0;						\
428	exit;						\
429l1_%=:	r0 = *(u8*)(r0 + %[bpf_sock_protocol]);		\
430	r0 = 0;						\
431	exit;						\
432"	:
433	: __imm(bpf_sk_fullsock),
434	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
435	  __imm_const(bpf_sock_protocol, offsetof(struct bpf_sock, protocol))
436	: __clobber_all);
437}
438
439SEC("cgroup/skb")
440__description("sk_fullsock(skb->sk): beyond last field")
441__failure __msg("invalid sock access")
442__failure_unpriv
443__naked void skb_sk_beyond_last_field_1(void)
444{
445	asm volatile ("					\
446	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
447	if r1 != 0 goto l0_%=;				\
448	r0 = 0;						\
449	exit;						\
450l0_%=:	call %[bpf_sk_fullsock];			\
451	if r0 != 0 goto l1_%=;				\
452	r0 = 0;						\
453	exit;						\
454l1_%=:	r0 = *(u32*)(r0 + %[bpf_sock_rx_queue_mapping__end]);\
455	r0 = 0;						\
456	exit;						\
457"	:
458	: __imm(bpf_sk_fullsock),
459	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
460	  __imm_const(bpf_sock_rx_queue_mapping__end, offsetofend(struct bpf_sock, rx_queue_mapping))
461	: __clobber_all);
462}
463
464SEC("cgroup/skb")
465__description("bpf_tcp_sock(skb->sk): no !skb->sk check")
466__failure __msg("type=sock_common_or_null expected=sock_common")
467__failure_unpriv
468__naked void sk_no_skb_sk_check_2(void)
469{
470	asm volatile ("					\
471	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
472	call %[bpf_tcp_sock];				\
473	r0 = 0;						\
474	exit;						\
475"	:
476	: __imm(bpf_tcp_sock),
477	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
478	: __clobber_all);
479}
480
481SEC("cgroup/skb")
482__description("bpf_tcp_sock(skb->sk): no NULL check on ret")
483__failure __msg("invalid mem access 'tcp_sock_or_null'")
484__failure_unpriv
485__naked void no_null_check_on_ret_2(void)
486{
487	asm volatile ("					\
488	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
489	if r1 != 0 goto l0_%=;				\
490	r0 = 0;						\
491	exit;						\
492l0_%=:	call %[bpf_tcp_sock];				\
493	r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]);	\
494	r0 = 0;						\
495	exit;						\
496"	:
497	: __imm(bpf_tcp_sock),
498	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
499	  __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
500	: __clobber_all);
501}
502
503SEC("cgroup/skb")
504__description("bpf_tcp_sock(skb->sk): tp->snd_cwnd")
505__success __success_unpriv __retval(0)
506__naked void skb_sk_tp_snd_cwnd_1(void)
507{
508	asm volatile ("					\
509	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
510	if r1 != 0 goto l0_%=;				\
511	r0 = 0;						\
512	exit;						\
513l0_%=:	call %[bpf_tcp_sock];				\
514	if r0 != 0 goto l1_%=;				\
515	exit;						\
516l1_%=:	r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]);	\
517	r0 = 0;						\
518	exit;						\
519"	:
520	: __imm(bpf_tcp_sock),
521	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
522	  __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
523	: __clobber_all);
524}
525
526SEC("cgroup/skb")
527__description("bpf_tcp_sock(skb->sk): tp->bytes_acked")
528__success __success_unpriv __retval(0)
529__naked void skb_sk_tp_bytes_acked(void)
530{
531	asm volatile ("					\
532	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
533	if r1 != 0 goto l0_%=;				\
534	r0 = 0;						\
535	exit;						\
536l0_%=:	call %[bpf_tcp_sock];				\
537	if r0 != 0 goto l1_%=;				\
538	exit;						\
539l1_%=:	r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked]);	\
540	r0 = 0;						\
541	exit;						\
542"	:
543	: __imm(bpf_tcp_sock),
544	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
545	  __imm_const(bpf_tcp_sock_bytes_acked, offsetof(struct bpf_tcp_sock, bytes_acked))
546	: __clobber_all);
547}
548
549SEC("cgroup/skb")
550__description("bpf_tcp_sock(skb->sk): beyond last field")
551__failure __msg("invalid tcp_sock access")
552__failure_unpriv
553__naked void skb_sk_beyond_last_field_2(void)
554{
555	asm volatile ("					\
556	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
557	if r1 != 0 goto l0_%=;				\
558	r0 = 0;						\
559	exit;						\
560l0_%=:	call %[bpf_tcp_sock];				\
561	if r0 != 0 goto l1_%=;				\
562	exit;						\
563l1_%=:	r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked__end]);\
564	r0 = 0;						\
565	exit;						\
566"	:
567	: __imm(bpf_tcp_sock),
568	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
569	  __imm_const(bpf_tcp_sock_bytes_acked__end, offsetofend(struct bpf_tcp_sock, bytes_acked))
570	: __clobber_all);
571}
572
573SEC("cgroup/skb")
574__description("bpf_tcp_sock(bpf_sk_fullsock(skb->sk)): tp->snd_cwnd")
575__success __success_unpriv __retval(0)
576__naked void skb_sk_tp_snd_cwnd_2(void)
577{
578	asm volatile ("					\
579	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
580	if r1 != 0 goto l0_%=;				\
581	r0 = 0;						\
582	exit;						\
583l0_%=:	call %[bpf_sk_fullsock];			\
584	if r0 != 0 goto l1_%=;				\
585	exit;						\
586l1_%=:	r1 = r0;					\
587	call %[bpf_tcp_sock];				\
588	if r0 != 0 goto l2_%=;				\
589	exit;						\
590l2_%=:	r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]);	\
591	r0 = 0;						\
592	exit;						\
593"	:
594	: __imm(bpf_sk_fullsock),
595	  __imm(bpf_tcp_sock),
596	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
597	  __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
598	: __clobber_all);
599}
600
601SEC("tc")
602__description("bpf_sk_release(skb->sk)")
603__failure __msg("R1 must be referenced when passed to release function")
604__naked void bpf_sk_release_skb_sk(void)
605{
606	asm volatile ("					\
607	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
608	if r1 == 0 goto l0_%=;				\
609	call %[bpf_sk_release];				\
610l0_%=:	r0 = 0;						\
611	exit;						\
612"	:
613	: __imm(bpf_sk_release),
614	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
615	: __clobber_all);
616}
617
618SEC("tc")
619__description("bpf_sk_release(bpf_sk_fullsock(skb->sk))")
620__failure __msg("R1 must be referenced when passed to release function")
621__naked void bpf_sk_fullsock_skb_sk(void)
622{
623	asm volatile ("					\
624	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
625	if r1 != 0 goto l0_%=;				\
626	r0 = 0;						\
627	exit;						\
628l0_%=:	call %[bpf_sk_fullsock];			\
629	if r0 != 0 goto l1_%=;				\
630	exit;						\
631l1_%=:	r1 = r0;					\
632	call %[bpf_sk_release];				\
633	r0 = 1;						\
634	exit;						\
635"	:
636	: __imm(bpf_sk_fullsock),
637	  __imm(bpf_sk_release),
638	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
639	: __clobber_all);
640}
641
642SEC("tc")
643__description("bpf_sk_release(bpf_tcp_sock(skb->sk))")
644__failure __msg("R1 must be referenced when passed to release function")
645__naked void bpf_tcp_sock_skb_sk(void)
646{
647	asm volatile ("					\
648	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
649	if r1 != 0 goto l0_%=;				\
650	r0 = 0;						\
651	exit;						\
652l0_%=:	call %[bpf_tcp_sock];				\
653	if r0 != 0 goto l1_%=;				\
654	exit;						\
655l1_%=:	r1 = r0;					\
656	call %[bpf_sk_release];				\
657	r0 = 1;						\
658	exit;						\
659"	:
660	: __imm(bpf_sk_release),
661	  __imm(bpf_tcp_sock),
662	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
663	: __clobber_all);
664}
665
666SEC("tc")
667__description("sk_storage_get(map, skb->sk, NULL, 0): value == NULL")
668__success __retval(0)
669__naked void sk_null_0_value_null(void)
670{
671	asm volatile ("					\
672	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
673	if r1 != 0 goto l0_%=;				\
674	r0 = 0;						\
675	exit;						\
676l0_%=:	call %[bpf_sk_fullsock];			\
677	if r0 != 0 goto l1_%=;				\
678	r0 = 0;						\
679	exit;						\
680l1_%=:	r4 = 0;						\
681	r3 = 0;						\
682	r2 = r0;					\
683	r1 = %[sk_storage_map] ll;			\
684	call %[bpf_sk_storage_get];			\
685	r0 = 0;						\
686	exit;						\
687"	:
688	: __imm(bpf_sk_fullsock),
689	  __imm(bpf_sk_storage_get),
690	  __imm_addr(sk_storage_map),
691	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
692	: __clobber_all);
693}
694
695SEC("tc")
696__description("sk_storage_get(map, skb->sk, 1, 1): value == 1")
697__failure __msg("R3 type=scalar expected=fp")
698__naked void sk_1_1_value_1(void)
699{
700	asm volatile ("					\
701	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
702	if r1 != 0 goto l0_%=;				\
703	r0 = 0;						\
704	exit;						\
705l0_%=:	call %[bpf_sk_fullsock];			\
706	if r0 != 0 goto l1_%=;				\
707	r0 = 0;						\
708	exit;						\
709l1_%=:	r4 = 1;						\
710	r3 = 1;						\
711	r2 = r0;					\
712	r1 = %[sk_storage_map] ll;			\
713	call %[bpf_sk_storage_get];			\
714	r0 = 0;						\
715	exit;						\
716"	:
717	: __imm(bpf_sk_fullsock),
718	  __imm(bpf_sk_storage_get),
719	  __imm_addr(sk_storage_map),
720	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
721	: __clobber_all);
722}
723
724SEC("tc")
725__description("sk_storage_get(map, skb->sk, &stack_value, 1): stack_value")
726__success __retval(0)
727__naked void stack_value_1_stack_value(void)
728{
729	asm volatile ("					\
730	r2 = 0;						\
731	*(u64*)(r10 - 8) = r2;				\
732	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
733	if r1 != 0 goto l0_%=;				\
734	r0 = 0;						\
735	exit;						\
736l0_%=:	call %[bpf_sk_fullsock];			\
737	if r0 != 0 goto l1_%=;				\
738	r0 = 0;						\
739	exit;						\
740l1_%=:	r4 = 1;						\
741	r3 = r10;					\
742	r3 += -8;					\
743	r2 = r0;					\
744	r1 = %[sk_storage_map] ll;			\
745	call %[bpf_sk_storage_get];			\
746	r0 = 0;						\
747	exit;						\
748"	:
749	: __imm(bpf_sk_fullsock),
750	  __imm(bpf_sk_storage_get),
751	  __imm_addr(sk_storage_map),
752	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
753	: __clobber_all);
754}
755
756SEC("tc")
757__description("bpf_map_lookup_elem(smap, &key)")
758__failure __msg("cannot pass map_type 24 into func bpf_map_lookup_elem")
759__naked void map_lookup_elem_smap_key(void)
760{
761	asm volatile ("					\
762	r1 = 0;						\
763	*(u32*)(r10 - 4) = r1;				\
764	r2 = r10;					\
765	r2 += -4;					\
766	r1 = %[sk_storage_map] ll;			\
767	call %[bpf_map_lookup_elem];			\
768	r0 = 0;						\
769	exit;						\
770"	:
771	: __imm(bpf_map_lookup_elem),
772	  __imm_addr(sk_storage_map)
773	: __clobber_all);
774}
775
776SEC("xdp")
777__description("bpf_map_lookup_elem(xskmap, &key); xs->queue_id")
778__success __retval(0)
779__naked void xskmap_key_xs_queue_id(void)
780{
781	asm volatile ("					\
782	r1 = 0;						\
783	*(u32*)(r10 - 8) = r1;				\
784	r2 = r10;					\
785	r2 += -8;					\
786	r1 = %[map_xskmap] ll;				\
787	call %[bpf_map_lookup_elem];			\
788	if r0 != 0 goto l0_%=;				\
789	exit;						\
790l0_%=:	r0 = *(u32*)(r0 + %[bpf_xdp_sock_queue_id]);	\
791	r0 = 0;						\
792	exit;						\
793"	:
794	: __imm(bpf_map_lookup_elem),
795	  __imm_addr(map_xskmap),
796	  __imm_const(bpf_xdp_sock_queue_id, offsetof(struct bpf_xdp_sock, queue_id))
797	: __clobber_all);
798}
799
800SEC("sk_skb")
801__description("bpf_map_lookup_elem(sockmap, &key)")
802__failure __msg("Unreleased reference id=2 alloc_insn=6")
803__naked void map_lookup_elem_sockmap_key(void)
804{
805	asm volatile ("					\
806	r1 = 0;						\
807	*(u32*)(r10 - 4) = r1;				\
808	r2 = r10;					\
809	r2 += -4;					\
810	r1 = %[map_sockmap] ll;				\
811	call %[bpf_map_lookup_elem];			\
812	r0 = 0;						\
813	exit;						\
814"	:
815	: __imm(bpf_map_lookup_elem),
816	  __imm_addr(map_sockmap)
817	: __clobber_all);
818}
819
820SEC("sk_skb")
821__description("bpf_map_lookup_elem(sockhash, &key)")
822__failure __msg("Unreleased reference id=2 alloc_insn=6")
823__naked void map_lookup_elem_sockhash_key(void)
824{
825	asm volatile ("					\
826	r1 = 0;						\
827	*(u32*)(r10 - 4) = r1;				\
828	r2 = r10;					\
829	r2 += -4;					\
830	r1 = %[map_sockhash] ll;			\
831	call %[bpf_map_lookup_elem];			\
832	r0 = 0;						\
833	exit;						\
834"	:
835	: __imm(bpf_map_lookup_elem),
836	  __imm_addr(map_sockhash)
837	: __clobber_all);
838}
839
840SEC("sk_skb")
841__description("bpf_map_lookup_elem(sockmap, &key); sk->type [fullsock field]; bpf_sk_release(sk)")
842__success
843__naked void field_bpf_sk_release_sk_1(void)
844{
845	asm volatile ("					\
846	r1 = 0;						\
847	*(u32*)(r10 - 4) = r1;				\
848	r2 = r10;					\
849	r2 += -4;					\
850	r1 = %[map_sockmap] ll;				\
851	call %[bpf_map_lookup_elem];			\
852	if r0 != 0 goto l0_%=;				\
853	exit;						\
854l0_%=:	r1 = r0;					\
855	r0 = *(u32*)(r0 + %[bpf_sock_type]);		\
856	call %[bpf_sk_release];				\
857	exit;						\
858"	:
859	: __imm(bpf_map_lookup_elem),
860	  __imm(bpf_sk_release),
861	  __imm_addr(map_sockmap),
862	  __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
863	: __clobber_all);
864}
865
866SEC("sk_skb")
867__description("bpf_map_lookup_elem(sockhash, &key); sk->type [fullsock field]; bpf_sk_release(sk)")
868__success
869__naked void field_bpf_sk_release_sk_2(void)
870{
871	asm volatile ("					\
872	r1 = 0;						\
873	*(u32*)(r10 - 4) = r1;				\
874	r2 = r10;					\
875	r2 += -4;					\
876	r1 = %[map_sockhash] ll;			\
877	call %[bpf_map_lookup_elem];			\
878	if r0 != 0 goto l0_%=;				\
879	exit;						\
880l0_%=:	r1 = r0;					\
881	r0 = *(u32*)(r0 + %[bpf_sock_type]);		\
882	call %[bpf_sk_release];				\
883	exit;						\
884"	:
885	: __imm(bpf_map_lookup_elem),
886	  __imm(bpf_sk_release),
887	  __imm_addr(map_sockhash),
888	  __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
889	: __clobber_all);
890}
891
892SEC("sk_reuseport")
893__description("bpf_sk_select_reuseport(ctx, reuseport_array, &key, flags)")
894__success
895__naked void ctx_reuseport_array_key_flags(void)
896{
897	asm volatile ("					\
898	r4 = 0;						\
899	r2 = 0;						\
900	*(u32*)(r10 - 4) = r2;				\
901	r3 = r10;					\
902	r3 += -4;					\
903	r2 = %[map_reuseport_array] ll;			\
904	call %[bpf_sk_select_reuseport];		\
905	exit;						\
906"	:
907	: __imm(bpf_sk_select_reuseport),
908	  __imm_addr(map_reuseport_array)
909	: __clobber_all);
910}
911
912SEC("sk_reuseport")
913__description("bpf_sk_select_reuseport(ctx, sockmap, &key, flags)")
914__success
915__naked void reuseport_ctx_sockmap_key_flags(void)
916{
917	asm volatile ("					\
918	r4 = 0;						\
919	r2 = 0;						\
920	*(u32*)(r10 - 4) = r2;				\
921	r3 = r10;					\
922	r3 += -4;					\
923	r2 = %[map_sockmap] ll;				\
924	call %[bpf_sk_select_reuseport];		\
925	exit;						\
926"	:
927	: __imm(bpf_sk_select_reuseport),
928	  __imm_addr(map_sockmap)
929	: __clobber_all);
930}
931
932SEC("sk_reuseport")
933__description("bpf_sk_select_reuseport(ctx, sockhash, &key, flags)")
934__success
935__naked void reuseport_ctx_sockhash_key_flags(void)
936{
937	asm volatile ("					\
938	r4 = 0;						\
939	r2 = 0;						\
940	*(u32*)(r10 - 4) = r2;				\
941	r3 = r10;					\
942	r3 += -4;					\
943	r2 = %[map_sockmap] ll;				\
944	call %[bpf_sk_select_reuseport];		\
945	exit;						\
946"	:
947	: __imm(bpf_sk_select_reuseport),
948	  __imm_addr(map_sockmap)
949	: __clobber_all);
950}
951
952SEC("tc")
953__description("mark null check on return value of bpf_skc_to helpers")
954__failure __msg("invalid mem access")
955__naked void of_bpf_skc_to_helpers(void)
956{
957	asm volatile ("					\
958	r1 = *(u64*)(r1 + %[__sk_buff_sk]);		\
959	if r1 != 0 goto l0_%=;				\
960	r0 = 0;						\
961	exit;						\
962l0_%=:	r6 = r1;					\
963	call %[bpf_skc_to_tcp_sock];			\
964	r7 = r0;					\
965	r1 = r6;					\
966	call %[bpf_skc_to_tcp_request_sock];		\
967	r8 = r0;					\
968	if r8 != 0 goto l1_%=;				\
969	r0 = 0;						\
970	exit;						\
971l1_%=:	r0 = *(u8*)(r7 + 0);				\
972	exit;						\
973"	:
974	: __imm(bpf_skc_to_tcp_request_sock),
975	  __imm(bpf_skc_to_tcp_sock),
976	  __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
977	: __clobber_all);
978}
979
980char _license[] SEC("license") = "GPL";
981