@@ -577,4 +577,158 @@ __naked void partial_stack_load_preserves_zeros(void)
577
577
: __clobber_common );
578
578
}
579
579
580
+ char two_byte_buf [2 ] SEC (".data.two_byte_buf" );
581
+
582
+ SEC ("raw_tp" )
583
+ __log_level (2 ) __flag (BPF_F_TEST_STATE_FREQ )
584
+ __success
585
+ /* make sure fp-8 is IMPRECISE fake register spill */
586
+ __msg ("3: (7a) *(u64 *)(r10 -8) = 1 ; R10=fp0 fp-8_w=1" )
587
+ /* and fp-16 is spilled IMPRECISE const reg */
588
+ __msg ("5: (7b) *(u64 *)(r10 -16) = r0 ; R0_w=1 R10=fp0 fp-16_w=1" )
589
+ /* validate load from fp-8, which was initialized using BPF_ST_MEM */
590
+ __msg ("8: (79) r2 = *(u64 *)(r10 -8) ; R2_w=1 R10=fp0 fp-8=1" )
591
+ __msg ("9: (0f) r1 += r2" )
592
+ __msg ("mark_precise: frame0: last_idx 9 first_idx 7 subseq_idx -1" )
593
+ __msg ("mark_precise: frame0: regs=r2 stack= before 8: (79) r2 = *(u64 *)(r10 -8)" )
594
+ __msg ("mark_precise: frame0: regs= stack=-8 before 7: (bf) r1 = r6" )
595
+ /* note, fp-8 is precise, fp-16 is not yet precise, we'll get there */
596
+ __msg ("mark_precise: frame0: parent state regs= stack=-8: R0_w=1 R1=ctx() R6_r=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8_rw=P1 fp-16_w=1" )
597
+ __msg ("mark_precise: frame0: last_idx 6 first_idx 3 subseq_idx 7" )
598
+ __msg ("mark_precise: frame0: regs= stack=-8 before 6: (05) goto pc+0" )
599
+ __msg ("mark_precise: frame0: regs= stack=-8 before 5: (7b) *(u64 *)(r10 -16) = r0" )
600
+ __msg ("mark_precise: frame0: regs= stack=-8 before 4: (b7) r0 = 1" )
601
+ __msg ("mark_precise: frame0: regs= stack=-8 before 3: (7a) *(u64 *)(r10 -8) = 1" )
602
+ __msg ("10: R1_w=map_value(map=.data.two_byte_,ks=4,vs=2,off=1) R2_w=1" )
603
+ /* validate load from fp-16, which was initialized using BPF_STX_MEM */
604
+ __msg ("12: (79) r2 = *(u64 *)(r10 -16) ; R2_w=1 R10=fp0 fp-16=1" )
605
+ __msg ("13: (0f) r1 += r2" )
606
+ __msg ("mark_precise: frame0: last_idx 13 first_idx 7 subseq_idx -1" )
607
+ __msg ("mark_precise: frame0: regs=r2 stack= before 12: (79) r2 = *(u64 *)(r10 -16)" )
608
+ __msg ("mark_precise: frame0: regs= stack=-16 before 11: (bf) r1 = r6" )
609
+ __msg ("mark_precise: frame0: regs= stack=-16 before 10: (73) *(u8 *)(r1 +0) = r2" )
610
+ __msg ("mark_precise: frame0: regs= stack=-16 before 9: (0f) r1 += r2" )
611
+ __msg ("mark_precise: frame0: regs= stack=-16 before 8: (79) r2 = *(u64 *)(r10 -8)" )
612
+ __msg ("mark_precise: frame0: regs= stack=-16 before 7: (bf) r1 = r6" )
613
+ /* now both fp-8 and fp-16 are precise, very good */
614
+ __msg ("mark_precise: frame0: parent state regs= stack=-16: R0_w=1 R1=ctx() R6_r=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8_rw=P1 fp-16_rw=P1" )
615
+ __msg ("mark_precise: frame0: last_idx 6 first_idx 3 subseq_idx 7" )
616
+ __msg ("mark_precise: frame0: regs= stack=-16 before 6: (05) goto pc+0" )
617
+ __msg ("mark_precise: frame0: regs= stack=-16 before 5: (7b) *(u64 *)(r10 -16) = r0" )
618
+ __msg ("mark_precise: frame0: regs=r0 stack= before 4: (b7) r0 = 1" )
619
+ __msg ("14: R1_w=map_value(map=.data.two_byte_,ks=4,vs=2,off=1) R2_w=1" )
620
+ __naked void stack_load_preserves_const_precision (void )
621
+ {
622
+ asm volatile (
623
+ /* establish checkpoint with state that has no stack slots;
624
+ * if we bubble up to this state without finding desired stack
625
+ * slot, then it's a bug and should be caught
626
+ */
627
+ "goto +0;"
628
+
629
+ /* fp-8 is const 1 *fake* register */
630
+ ".8byte %[fp8_st_one];" /* LLVM-18+: *(u64 *)(r10 -8) = 1; */
631
+
632
+ /* fp-16 is const 1 register */
633
+ "r0 = 1;"
634
+ "*(u64 *)(r10 -16) = r0;"
635
+
636
+ /* force checkpoint to check precision marks preserved in parent states */
637
+ "goto +0;"
638
+
639
+ /* load single U64 from aligned FAKE_REG=1 slot */
640
+ "r1 = %[two_byte_buf];"
641
+ "r2 = *(u64 *)(r10 -8);"
642
+ "r1 += r2;"
643
+ "*(u8 *)(r1 + 0) = r2;" /* this should be fine */
644
+
645
+ /* load single U64 from aligned REG=1 slot */
646
+ "r1 = %[two_byte_buf];"
647
+ "r2 = *(u64 *)(r10 -16);"
648
+ "r1 += r2;"
649
+ "*(u8 *)(r1 + 0) = r2;" /* this should be fine */
650
+
651
+ "r0 = 0;"
652
+ "exit;"
653
+ :
654
+ : __imm_ptr (two_byte_buf ),
655
+ __imm_insn (fp8_st_one , BPF_ST_MEM (BPF_DW , BPF_REG_FP , -8 , 1 ))
656
+ : __clobber_common );
657
+ }
658
+
659
+ SEC ("raw_tp" )
660
+ __log_level (2 ) __flag (BPF_F_TEST_STATE_FREQ )
661
+ __success
662
+ /* make sure fp-8 is 32-bit FAKE subregister spill */
663
+ __msg ("3: (62) *(u32 *)(r10 -8) = 1 ; R10=fp0 fp-8=????1" )
664
+ /* but fp-16 is spilled IMPRECISE zero const reg */
665
+ __msg ("5: (63) *(u32 *)(r10 -16) = r0 ; R0_w=1 R10=fp0 fp-16=????1" )
666
+ /* validate load from fp-8, which was initialized using BPF_ST_MEM */
667
+ __msg ("8: (61) r2 = *(u32 *)(r10 -8) ; R2_w=1 R10=fp0 fp-8=????1" )
668
+ __msg ("9: (0f) r1 += r2" )
669
+ __msg ("mark_precise: frame0: last_idx 9 first_idx 7 subseq_idx -1" )
670
+ __msg ("mark_precise: frame0: regs=r2 stack= before 8: (61) r2 = *(u32 *)(r10 -8)" )
671
+ __msg ("mark_precise: frame0: regs= stack=-8 before 7: (bf) r1 = r6" )
672
+ __msg ("mark_precise: frame0: parent state regs= stack=-8: R0_w=1 R1=ctx() R6_r=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8_r=????P1 fp-16=????1" )
673
+ __msg ("mark_precise: frame0: last_idx 6 first_idx 3 subseq_idx 7" )
674
+ __msg ("mark_precise: frame0: regs= stack=-8 before 6: (05) goto pc+0" )
675
+ __msg ("mark_precise: frame0: regs= stack=-8 before 5: (63) *(u32 *)(r10 -16) = r0" )
676
+ __msg ("mark_precise: frame0: regs= stack=-8 before 4: (b7) r0 = 1" )
677
+ __msg ("mark_precise: frame0: regs= stack=-8 before 3: (62) *(u32 *)(r10 -8) = 1" )
678
+ __msg ("10: R1_w=map_value(map=.data.two_byte_,ks=4,vs=2,off=1) R2_w=1" )
679
+ /* validate load from fp-16, which was initialized using BPF_STX_MEM */
680
+ __msg ("12: (61) r2 = *(u32 *)(r10 -16) ; R2_w=1 R10=fp0 fp-16=????1" )
681
+ __msg ("13: (0f) r1 += r2" )
682
+ __msg ("mark_precise: frame0: last_idx 13 first_idx 7 subseq_idx -1" )
683
+ __msg ("mark_precise: frame0: regs=r2 stack= before 12: (61) r2 = *(u32 *)(r10 -16)" )
684
+ __msg ("mark_precise: frame0: regs= stack=-16 before 11: (bf) r1 = r6" )
685
+ __msg ("mark_precise: frame0: regs= stack=-16 before 10: (73) *(u8 *)(r1 +0) = r2" )
686
+ __msg ("mark_precise: frame0: regs= stack=-16 before 9: (0f) r1 += r2" )
687
+ __msg ("mark_precise: frame0: regs= stack=-16 before 8: (61) r2 = *(u32 *)(r10 -8)" )
688
+ __msg ("mark_precise: frame0: regs= stack=-16 before 7: (bf) r1 = r6" )
689
+ __msg ("mark_precise: frame0: parent state regs= stack=-16: R0_w=1 R1=ctx() R6_r=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8_r=????P1 fp-16_r=????P1" )
690
+ __msg ("mark_precise: frame0: last_idx 6 first_idx 3 subseq_idx 7" )
691
+ __msg ("mark_precise: frame0: regs= stack=-16 before 6: (05) goto pc+0" )
692
+ __msg ("mark_precise: frame0: regs= stack=-16 before 5: (63) *(u32 *)(r10 -16) = r0" )
693
+ __msg ("mark_precise: frame0: regs=r0 stack= before 4: (b7) r0 = 1" )
694
+ __msg ("14: R1_w=map_value(map=.data.two_byte_,ks=4,vs=2,off=1) R2_w=1" )
695
+ __naked void stack_load_preserves_const_precision_subreg (void )
696
+ {
697
+ asm volatile (
698
+ /* establish checkpoint with state that has no stack slots;
699
+ * if we bubble up to this state without finding desired stack
700
+ * slot, then it's a bug and should be caught
701
+ */
702
+ "goto +0;"
703
+
704
+ /* fp-8 is const 1 *fake* SUB-register */
705
+ ".8byte %[fp8_st_one];" /* LLVM-18+: *(u32 *)(r10 -8) = 1; */
706
+
707
+ /* fp-16 is const 1 SUB-register */
708
+ "r0 = 1;"
709
+ "*(u32 *)(r10 -16) = r0;"
710
+
711
+ /* force checkpoint to check precision marks preserved in parent states */
712
+ "goto +0;"
713
+
714
+ /* load single U32 from aligned FAKE_REG=1 slot */
715
+ "r1 = %[two_byte_buf];"
716
+ "r2 = *(u32 *)(r10 -8);"
717
+ "r1 += r2;"
718
+ "*(u8 *)(r1 + 0) = r2;" /* this should be fine */
719
+
720
+ /* load single U32 from aligned REG=1 slot */
721
+ "r1 = %[two_byte_buf];"
722
+ "r2 = *(u32 *)(r10 -16);"
723
+ "r1 += r2;"
724
+ "*(u8 *)(r1 + 0) = r2;" /* this should be fine */
725
+
726
+ "r0 = 0;"
727
+ "exit;"
728
+ :
729
+ : __imm_ptr (two_byte_buf ),
730
+ __imm_insn (fp8_st_one , BPF_ST_MEM (BPF_W , BPF_REG_FP , -8 , 1 )) /* 32-bit spill */
731
+ : __clobber_common );
732
+ }
733
+
580
734
char _license [] SEC ("license" ) = "GPL" ;
0 commit comments