1// SPDX-License-Identifier: GPL-2.0 2 3#include <linux/bpf.h> 4#include <bpf/bpf_helpers.h> 5#include "bpf_misc.h" 6 7/* Check that precision marks propagate through scalar IDs. 8 * Registers r{0,1,2} have the same scalar ID at the moment when r0 is 9 * marked to be precise, this mark is immediately propagated to r{1,2}. 10 */ 11SEC("socket") 12__success __log_level(2) 13__msg("frame0: regs=r0,r1,r2 stack= before 4: (bf) r3 = r10") 14__msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0") 15__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0") 16__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255") 17__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns") 18__flag(BPF_F_TEST_STATE_FREQ) 19__naked void precision_same_state(void) 20{ 21 asm volatile ( 22 /* r0 = random number up to 0xff */ 23 "call %[bpf_ktime_get_ns];" 24 "r0 &= 0xff;" 25 /* tie r0.id == r1.id == r2.id */ 26 "r1 = r0;" 27 "r2 = r0;" 28 /* force r0 to be precise, this immediately marks r1 and r2 as 29 * precise as well because of shared IDs 30 */ 31 "r3 = r10;" 32 "r3 += r0;" 33 "r0 = 0;" 34 "exit;" 35 : 36 : __imm(bpf_ktime_get_ns) 37 : __clobber_all); 38} 39 40/* Same as precision_same_state, but mark propagates through state / 41 * parent state boundary. 42 */ 43SEC("socket") 44__success __log_level(2) 45__msg("frame0: last_idx 6 first_idx 5 subseq_idx -1") 46__msg("frame0: regs=r0,r1,r2 stack= before 5: (bf) r3 = r10") 47__msg("frame0: parent state regs=r0,r1,r2 stack=:") 48__msg("frame0: regs=r0,r1,r2 stack= before 4: (05) goto pc+0") 49__msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0") 50__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0") 51__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255") 52__msg("frame0: parent state regs=r0 stack=:") 53__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns") 54__flag(BPF_F_TEST_STATE_FREQ) 55__naked void precision_cross_state(void) 56{ 57 asm volatile ( 58 /* r0 = random number up to 0xff */ 59 "call %[bpf_ktime_get_ns];" 60 "r0 &= 0xff;" 61 /* tie r0.id == r1.id == r2.id */ 62 "r1 = r0;" 63 "r2 = r0;" 64 /* force checkpoint */ 65 "goto +0;" 66 /* force r0 to be precise, this immediately marks r1 and r2 as 67 * precise as well because of shared IDs 68 */ 69 "r3 = r10;" 70 "r3 += r0;" 71 "r0 = 0;" 72 "exit;" 73 : 74 : __imm(bpf_ktime_get_ns) 75 : __clobber_all); 76} 77 78/* Same as precision_same_state, but break one of the 79 * links, note that r1 is absent from regs=... in __msg below. 80 */ 81SEC("socket") 82__success __log_level(2) 83__msg("frame0: regs=r0,r2 stack= before 5: (bf) r3 = r10") 84__msg("frame0: regs=r0,r2 stack= before 4: (b7) r1 = 0") 85__msg("frame0: regs=r0,r2 stack= before 3: (bf) r2 = r0") 86__msg("frame0: regs=r0 stack= before 2: (bf) r1 = r0") 87__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255") 88__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns") 89__flag(BPF_F_TEST_STATE_FREQ) 90__naked void precision_same_state_broken_link(void) 91{ 92 asm volatile ( 93 /* r0 = random number up to 0xff */ 94 "call %[bpf_ktime_get_ns];" 95 "r0 &= 0xff;" 96 /* tie r0.id == r1.id == r2.id */ 97 "r1 = r0;" 98 "r2 = r0;" 99 /* break link for r1, this is the only line that differs 100 * compared to the previous test 101 */ 102 "r1 = 0;" 103 /* force r0 to be precise, this immediately marks r1 and r2 as 104 * precise as well because of shared IDs 105 */ 106 "r3 = r10;" 107 "r3 += r0;" 108 "r0 = 0;" 109 "exit;" 110 : 111 : __imm(bpf_ktime_get_ns) 112 : __clobber_all); 113} 114 115/* Same as precision_same_state_broken_link, but with state / 116 * parent state boundary. 117 */ 118SEC("socket") 119__success __log_level(2) 120__msg("frame0: regs=r0,r2 stack= before 6: (bf) r3 = r10") 121__msg("frame0: regs=r0,r2 stack= before 5: (b7) r1 = 0") 122__msg("frame0: parent state regs=r0,r2 stack=:") 123__msg("frame0: regs=r0,r1,r2 stack= before 4: (05) goto pc+0") 124__msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0") 125__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0") 126__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255") 127__msg("frame0: parent state regs=r0 stack=:") 128__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns") 129__flag(BPF_F_TEST_STATE_FREQ) 130__naked void precision_cross_state_broken_link(void) 131{ 132 asm volatile ( 133 /* r0 = random number up to 0xff */ 134 "call %[bpf_ktime_get_ns];" 135 "r0 &= 0xff;" 136 /* tie r0.id == r1.id == r2.id */ 137 "r1 = r0;" 138 "r2 = r0;" 139 /* force checkpoint, although link between r1 and r{0,2} is 140 * broken by the next statement current precision tracking 141 * algorithm can't react to it and propagates mark for r1 to 142 * the parent state. 143 */ 144 "goto +0;" 145 /* break link for r1, this is the only line that differs 146 * compared to precision_cross_state() 147 */ 148 "r1 = 0;" 149 /* force r0 to be precise, this immediately marks r1 and r2 as 150 * precise as well because of shared IDs 151 */ 152 "r3 = r10;" 153 "r3 += r0;" 154 "r0 = 0;" 155 "exit;" 156 : 157 : __imm(bpf_ktime_get_ns) 158 : __clobber_all); 159} 160 161/* Check that precision marks propagate through scalar IDs. 162 * Use the same scalar ID in multiple stack frames, check that 163 * precision information is propagated up the call stack. 164 */ 165SEC("socket") 166__success __log_level(2) 167__msg("11: (0f) r2 += r1") 168/* Current state */ 169__msg("frame2: last_idx 11 first_idx 10 subseq_idx -1") 170__msg("frame2: regs=r1 stack= before 10: (bf) r2 = r10") 171__msg("frame2: parent state regs=r1 stack=") 172/* frame1.r{6,7} are marked because mark_precise_scalar_ids() 173 * looks for all registers with frame2.r1.id in the current state 174 */ 175__msg("frame1: parent state regs=r6,r7 stack=") 176__msg("frame0: parent state regs=r6 stack=") 177/* Parent state */ 178__msg("frame2: last_idx 8 first_idx 8 subseq_idx 10") 179__msg("frame2: regs=r1 stack= before 8: (85) call pc+1") 180/* frame1.r1 is marked because of backtracking of call instruction */ 181__msg("frame1: parent state regs=r1,r6,r7 stack=") 182__msg("frame0: parent state regs=r6 stack=") 183/* Parent state */ 184__msg("frame1: last_idx 7 first_idx 6 subseq_idx 8") 185__msg("frame1: regs=r1,r6,r7 stack= before 7: (bf) r7 = r1") 186__msg("frame1: regs=r1,r6 stack= before 6: (bf) r6 = r1") 187__msg("frame1: parent state regs=r1 stack=") 188__msg("frame0: parent state regs=r6 stack=") 189/* Parent state */ 190__msg("frame1: last_idx 4 first_idx 4 subseq_idx 6") 191__msg("frame1: regs=r1 stack= before 4: (85) call pc+1") 192__msg("frame0: parent state regs=r1,r6 stack=") 193/* Parent state */ 194__msg("frame0: last_idx 3 first_idx 1 subseq_idx 4") 195__msg("frame0: regs=r0,r1,r6 stack= before 3: (bf) r6 = r0") 196__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0") 197__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255") 198__flag(BPF_F_TEST_STATE_FREQ) 199__naked void precision_many_frames(void) 200{ 201 asm volatile ( 202 /* r0 = random number up to 0xff */ 203 "call %[bpf_ktime_get_ns];" 204 "r0 &= 0xff;" 205 /* tie r0.id == r1.id == r6.id */ 206 "r1 = r0;" 207 "r6 = r0;" 208 "call precision_many_frames__foo;" 209 "exit;" 210 : 211 : __imm(bpf_ktime_get_ns) 212 : __clobber_all); 213} 214 215static __naked __noinline __used 216void precision_many_frames__foo(void) 217{ 218 asm volatile ( 219 /* conflate one of the register numbers (r6) with outer frame, 220 * to verify that those are tracked independently 221 */ 222 "r6 = r1;" 223 "r7 = r1;" 224 "call precision_many_frames__bar;" 225 "exit" 226 ::: __clobber_all); 227} 228 229static __naked __noinline __used 230void precision_many_frames__bar(void) 231{ 232 asm volatile ( 233 /* force r1 to be precise, this immediately marks: 234 * - bar frame r1 235 * - foo frame r{1,6,7} 236 * - main frame r{1,6} 237 */ 238 "r2 = r10;" 239 "r2 += r1;" 240 "r0 = 0;" 241 "exit;" 242 ::: __clobber_all); 243} 244 245/* Check that scalars with the same IDs are marked precise on stack as 246 * well as in registers. 247 */ 248SEC("socket") 249__success __log_level(2) 250/* foo frame */ 251__msg("frame1: regs=r1 stack=-8,-16 before 9: (bf) r2 = r10") 252__msg("frame1: regs=r1 stack=-8,-16 before 8: (7b) *(u64 *)(r10 -16) = r1") 253__msg("frame1: regs=r1 stack=-8 before 7: (7b) *(u64 *)(r10 -8) = r1") 254__msg("frame1: regs=r1 stack= before 4: (85) call pc+2") 255/* main frame */ 256__msg("frame0: regs=r0,r1 stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r1") 257__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0") 258__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255") 259__flag(BPF_F_TEST_STATE_FREQ) 260__naked void precision_stack(void) 261{ 262 asm volatile ( 263 /* r0 = random number up to 0xff */ 264 "call %[bpf_ktime_get_ns];" 265 "r0 &= 0xff;" 266 /* tie r0.id == r1.id == fp[-8].id */ 267 "r1 = r0;" 268 "*(u64*)(r10 - 8) = r1;" 269 "call precision_stack__foo;" 270 "r0 = 0;" 271 "exit;" 272 : 273 : __imm(bpf_ktime_get_ns) 274 : __clobber_all); 275} 276 277static __naked __noinline __used 278void precision_stack__foo(void) 279{ 280 asm volatile ( 281 /* conflate one of the register numbers (r6) with outer frame, 282 * to verify that those are tracked independently 283 */ 284 "*(u64*)(r10 - 8) = r1;" 285 "*(u64*)(r10 - 16) = r1;" 286 /* force r1 to be precise, this immediately marks: 287 * - foo frame r1,fp{-8,-16} 288 * - main frame r1,fp{-8} 289 */ 290 "r2 = r10;" 291 "r2 += r1;" 292 "exit" 293 ::: __clobber_all); 294} 295 296/* Use two separate scalar IDs to check that these are propagated 297 * independently. 298 */ 299SEC("socket") 300__success __log_level(2) 301/* r{6,7} */ 302__msg("11: (0f) r3 += r7") 303__msg("frame0: regs=r6,r7 stack= before 10: (bf) r3 = r10") 304/* ... skip some insns ... */ 305__msg("frame0: regs=r6,r7 stack= before 3: (bf) r7 = r0") 306__msg("frame0: regs=r0,r6 stack= before 2: (bf) r6 = r0") 307/* r{8,9} */ 308__msg("12: (0f) r3 += r9") 309__msg("frame0: regs=r8,r9 stack= before 11: (0f) r3 += r7") 310/* ... skip some insns ... */ 311__msg("frame0: regs=r8,r9 stack= before 7: (bf) r9 = r0") 312__msg("frame0: regs=r0,r8 stack= before 6: (bf) r8 = r0") 313__flag(BPF_F_TEST_STATE_FREQ) 314__naked void precision_two_ids(void) 315{ 316 asm volatile ( 317 /* r6 = random number up to 0xff 318 * r6.id == r7.id 319 */ 320 "call %[bpf_ktime_get_ns];" 321 "r0 &= 0xff;" 322 "r6 = r0;" 323 "r7 = r0;" 324 /* same, but for r{8,9} */ 325 "call %[bpf_ktime_get_ns];" 326 "r0 &= 0xff;" 327 "r8 = r0;" 328 "r9 = r0;" 329 /* clear r0 id */ 330 "r0 = 0;" 331 /* force checkpoint */ 332 "goto +0;" 333 "r3 = r10;" 334 /* force r7 to be precise, this also marks r6 */ 335 "r3 += r7;" 336 /* force r9 to be precise, this also marks r8 */ 337 "r3 += r9;" 338 "exit;" 339 : 340 : __imm(bpf_ktime_get_ns) 341 : __clobber_all); 342} 343 344/* Verify that check_ids() is used by regsafe() for scalars. 345 * 346 * r9 = ... some pointer with range X ... 347 * r6 = ... unbound scalar ID=a ... 348 * r7 = ... unbound scalar ID=b ... 349 * if (r6 > r7) goto +1 350 * r7 = r6 351 * if (r7 > X) goto exit 352 * r9 += r6 353 * ... access memory using r9 ... 354 * 355 * The memory access is safe only if r7 is bounded, 356 * which is true for one branch and not true for another. 357 */ 358SEC("socket") 359__failure __msg("register with unbounded min value") 360__flag(BPF_F_TEST_STATE_FREQ) 361__naked void check_ids_in_regsafe(void) 362{ 363 asm volatile ( 364 /* Bump allocated stack */ 365 "r1 = 0;" 366 "*(u64*)(r10 - 8) = r1;" 367 /* r9 = pointer to stack */ 368 "r9 = r10;" 369 "r9 += -8;" 370 /* r7 = ktime_get_ns() */ 371 "call %[bpf_ktime_get_ns];" 372 "r7 = r0;" 373 /* r6 = ktime_get_ns() */ 374 "call %[bpf_ktime_get_ns];" 375 "r6 = r0;" 376 /* if r6 > r7 is an unpredictable jump */ 377 "if r6 > r7 goto l1_%=;" 378 "r7 = r6;" 379"l1_%=:" 380 /* if r7 > 4 ...; transfers range to r6 on one execution path 381 * but does not transfer on another 382 */ 383 "if r7 > 4 goto l2_%=;" 384 /* Access memory at r9[r6], r6 is not always bounded */ 385 "r9 += r6;" 386 "r0 = *(u8*)(r9 + 0);" 387"l2_%=:" 388 "r0 = 0;" 389 "exit;" 390 : 391 : __imm(bpf_ktime_get_ns) 392 : __clobber_all); 393} 394 395/* Similar to check_ids_in_regsafe. 396 * The l0 could be reached in two states: 397 * 398 * (1) r6{.id=A}, r7{.id=A}, r8{.id=B} 399 * (2) r6{.id=B}, r7{.id=A}, r8{.id=B} 400 * 401 * Where (2) is not safe, as "r7 > 4" check won't propagate range for it. 402 * This example would be considered safe without changes to 403 * mark_chain_precision() to track scalar values with equal IDs. 404 */ 405SEC("socket") 406__failure __msg("register with unbounded min value") 407__flag(BPF_F_TEST_STATE_FREQ) 408__naked void check_ids_in_regsafe_2(void) 409{ 410 asm volatile ( 411 /* Bump allocated stack */ 412 "r1 = 0;" 413 "*(u64*)(r10 - 8) = r1;" 414 /* r9 = pointer to stack */ 415 "r9 = r10;" 416 "r9 += -8;" 417 /* r8 = ktime_get_ns() */ 418 "call %[bpf_ktime_get_ns];" 419 "r8 = r0;" 420 /* r7 = ktime_get_ns() */ 421 "call %[bpf_ktime_get_ns];" 422 "r7 = r0;" 423 /* r6 = ktime_get_ns() */ 424 "call %[bpf_ktime_get_ns];" 425 "r6 = r0;" 426 /* scratch .id from r0 */ 427 "r0 = 0;" 428 /* if r6 > r7 is an unpredictable jump */ 429 "if r6 > r7 goto l1_%=;" 430 /* tie r6 and r7 .id */ 431 "r6 = r7;" 432"l0_%=:" 433 /* if r7 > 4 exit(0) */ 434 "if r7 > 4 goto l2_%=;" 435 /* Access memory at r9[r6] */ 436 "r9 += r6;" 437 "r0 = *(u8*)(r9 + 0);" 438"l2_%=:" 439 "r0 = 0;" 440 "exit;" 441"l1_%=:" 442 /* tie r6 and r8 .id */ 443 "r6 = r8;" 444 "goto l0_%=;" 445 : 446 : __imm(bpf_ktime_get_ns) 447 : __clobber_all); 448} 449 450/* Check that scalar IDs *are not* generated on register to register 451 * assignments if source register is a constant. 452 * 453 * If such IDs *are* generated the 'l1' below would be reached in 454 * two states: 455 * 456 * (1) r1{.id=A}, r2{.id=A} 457 * (2) r1{.id=C}, r2{.id=C} 458 * 459 * Thus forcing 'if r1 == r2' verification twice. 460 */ 461SEC("socket") 462__success __log_level(2) 463__msg("11: (1d) if r3 == r4 goto pc+0") 464__msg("frame 0: propagating r3,r4") 465__msg("11: safe") 466__msg("processed 15 insns") 467__flag(BPF_F_TEST_STATE_FREQ) 468__naked void no_scalar_id_for_const(void) 469{ 470 asm volatile ( 471 "call %[bpf_ktime_get_ns];" 472 /* unpredictable jump */ 473 "if r0 > 7 goto l0_%=;" 474 /* possibly generate same scalar ids for r3 and r4 */ 475 "r1 = 0;" 476 "r1 = r1;" 477 "r3 = r1;" 478 "r4 = r1;" 479 "goto l1_%=;" 480"l0_%=:" 481 /* possibly generate different scalar ids for r3 and r4 */ 482 "r1 = 0;" 483 "r2 = 0;" 484 "r3 = r1;" 485 "r4 = r2;" 486"l1_%=:" 487 /* predictable jump, marks r3 and r4 precise */ 488 "if r3 == r4 goto +0;" 489 "r0 = 0;" 490 "exit;" 491 : 492 : __imm(bpf_ktime_get_ns) 493 : __clobber_all); 494} 495 496/* Same as no_scalar_id_for_const() but for 32-bit values */ 497SEC("socket") 498__success __log_level(2) 499__msg("11: (1e) if w3 == w4 goto pc+0") 500__msg("frame 0: propagating r3,r4") 501__msg("11: safe") 502__msg("processed 15 insns") 503__flag(BPF_F_TEST_STATE_FREQ) 504__naked void no_scalar_id_for_const32(void) 505{ 506 asm volatile ( 507 "call %[bpf_ktime_get_ns];" 508 /* unpredictable jump */ 509 "if r0 > 7 goto l0_%=;" 510 /* possibly generate same scalar ids for r3 and r4 */ 511 "w1 = 0;" 512 "w1 = w1;" 513 "w3 = w1;" 514 "w4 = w1;" 515 "goto l1_%=;" 516"l0_%=:" 517 /* possibly generate different scalar ids for r3 and r4 */ 518 "w1 = 0;" 519 "w2 = 0;" 520 "w3 = w1;" 521 "w4 = w2;" 522"l1_%=:" 523 /* predictable jump, marks r1 and r2 precise */ 524 "if w3 == w4 goto +0;" 525 "r0 = 0;" 526 "exit;" 527 : 528 : __imm(bpf_ktime_get_ns) 529 : __clobber_all); 530} 531 532/* Check that unique scalar IDs are ignored when new verifier state is 533 * compared to cached verifier state. For this test: 534 * - cached state has no id on r1 535 * - new state has a unique id on r1 536 */ 537SEC("socket") 538__success __log_level(2) 539__msg("6: (25) if r6 > 0x7 goto pc+1") 540__msg("7: (57) r1 &= 255") 541__msg("8: (bf) r2 = r10") 542__msg("from 6 to 8: safe") 543__msg("processed 12 insns") 544__flag(BPF_F_TEST_STATE_FREQ) 545__naked void ignore_unique_scalar_ids_cur(void) 546{ 547 asm volatile ( 548 "call %[bpf_ktime_get_ns];" 549 "r6 = r0;" 550 "call %[bpf_ktime_get_ns];" 551 "r0 &= 0xff;" 552 /* r1.id == r0.id */ 553 "r1 = r0;" 554 /* make r1.id unique */ 555 "r0 = 0;" 556 "if r6 > 7 goto l0_%=;" 557 /* clear r1 id, but keep the range compatible */ 558 "r1 &= 0xff;" 559"l0_%=:" 560 /* get here in two states: 561 * - first: r1 has no id (cached state) 562 * - second: r1 has a unique id (should be considered equivalent) 563 */ 564 "r2 = r10;" 565 "r2 += r1;" 566 "exit;" 567 : 568 : __imm(bpf_ktime_get_ns) 569 : __clobber_all); 570} 571 572/* Check that unique scalar IDs are ignored when new verifier state is 573 * compared to cached verifier state. For this test: 574 * - cached state has a unique id on r1 575 * - new state has no id on r1 576 */ 577SEC("socket") 578__success __log_level(2) 579__msg("6: (25) if r6 > 0x7 goto pc+1") 580__msg("7: (05) goto pc+1") 581__msg("9: (bf) r2 = r10") 582__msg("9: safe") 583__msg("processed 13 insns") 584__flag(BPF_F_TEST_STATE_FREQ) 585__naked void ignore_unique_scalar_ids_old(void) 586{ 587 asm volatile ( 588 "call %[bpf_ktime_get_ns];" 589 "r6 = r0;" 590 "call %[bpf_ktime_get_ns];" 591 "r0 &= 0xff;" 592 /* r1.id == r0.id */ 593 "r1 = r0;" 594 /* make r1.id unique */ 595 "r0 = 0;" 596 "if r6 > 7 goto l1_%=;" 597 "goto l0_%=;" 598"l1_%=:" 599 /* clear r1 id, but keep the range compatible */ 600 "r1 &= 0xff;" 601"l0_%=:" 602 /* get here in two states: 603 * - first: r1 has a unique id (cached state) 604 * - second: r1 has no id (should be considered equivalent) 605 */ 606 "r2 = r10;" 607 "r2 += r1;" 608 "exit;" 609 : 610 : __imm(bpf_ktime_get_ns) 611 : __clobber_all); 612} 613 614/* Check that two different scalar IDs in a verified state can't be 615 * mapped to the same scalar ID in current state. 616 */ 617SEC("socket") 618__success __log_level(2) 619/* The exit instruction should be reachable from two states, 620 * use two matches and "processed .. insns" to ensure this. 621 */ 622__msg("13: (95) exit") 623__msg("13: (95) exit") 624__msg("processed 18 insns") 625__flag(BPF_F_TEST_STATE_FREQ) 626__naked void two_old_ids_one_cur_id(void) 627{ 628 asm volatile ( 629 /* Give unique scalar IDs to r{6,7} */ 630 "call %[bpf_ktime_get_ns];" 631 "r0 &= 0xff;" 632 "r6 = r0;" 633 "call %[bpf_ktime_get_ns];" 634 "r0 &= 0xff;" 635 "r7 = r0;" 636 "r0 = 0;" 637 /* Maybe make r{6,7} IDs identical */ 638 "if r6 > r7 goto l0_%=;" 639 "goto l1_%=;" 640"l0_%=:" 641 "r6 = r7;" 642"l1_%=:" 643 /* Mark r{6,7} precise. 644 * Get here in two states: 645 * - first: r6{.id=A}, r7{.id=B} (cached state) 646 * - second: r6{.id=A}, r7{.id=A} 647 * Currently we don't want to consider such states equivalent. 648 * Thus "exit;" would be verified twice. 649 */ 650 "r2 = r10;" 651 "r2 += r6;" 652 "r2 += r7;" 653 "exit;" 654 : 655 : __imm(bpf_ktime_get_ns) 656 : __clobber_all); 657} 658 659char _license[] SEC("license") = "GPL"; 660