diff --git a/elf_reader_test.go b/elf_reader_test.go index 3ec5faa49..c1e5beefb 100644 --- a/elf_reader_test.go +++ b/elf_reader_test.go @@ -739,6 +739,9 @@ func TestLibBPFCompat(t *testing.T) { case "btf__core_reloc_type_id___missing_targets.o", "btf__core_reloc_flavors__err_wrong_name.o": valid = false + case "btf__core_reloc_ints___err_bitfield.o": + // Bitfields are now valid. + valid = true default: valid = !strings.Contains(name, "___err_") } diff --git a/internal/btf/core.go b/internal/btf/core.go index 317e7c311..3694d2c36 100644 --- a/internal/btf/core.go +++ b/internal/btf/core.go @@ -1,6 +1,7 @@ package btf import ( + "encoding/binary" "errors" "fmt" "math" @@ -17,7 +18,7 @@ import ( // COREFixup is the result of computing a CO-RE relocation for a target. type COREFixup struct { - Kind COREKind + Kind FixupKind Local uint32 Target uint32 Poison bool @@ -41,8 +42,8 @@ func (f COREFixup) apply(ins *asm.Instruction) error { switch class := ins.OpCode.Class(); class { case asm.LdXClass, asm.StClass, asm.StXClass: - if want := int16(f.Local); want != ins.Offset { - return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, want) + if want := f.Local; !f.Kind.bitfield && int16(want) != ins.Offset { + return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, f.Local) } if f.Target > math.MaxInt16 { @@ -56,8 +57,8 @@ func (f COREFixup) apply(ins *asm.Instruction) error { return fmt.Errorf("not a dword-sized immediate load") } - if want := int64(f.Local); want != ins.Constant { - return fmt.Errorf("invalid immediate %d, expected %d", ins.Constant, want) + if want := f.Local; !f.Kind.bitfield && int64(want) != ins.Constant { + return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v)", ins.Constant, want, f) } ins.Constant = int64(f.Target) @@ -74,8 +75,8 @@ func (f COREFixup) apply(ins *asm.Instruction) error { return fmt.Errorf("invalid source %s", src) } - if want := int64(f.Local); want != ins.Constant { - return fmt.Errorf("invalid immediate %d, expected %d", ins.Constant, want) + if want := f.Local; !f.Kind.bitfield && int64(want) != ins.Constant { + return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v, kind: %v, ins: %v)", ins.Constant, want, f, f.Kind, ins) } if f.Target > math.MaxInt32 { @@ -92,7 +93,7 @@ func (f COREFixup) apply(ins *asm.Instruction) error { } func (f COREFixup) isNonExistant() bool { - return f.Kind.checksForExistence() && f.Target == 0 + return f.Kind.coreKind.checksForExistence() && f.Target == 0 } type COREFixups map[uint64]COREFixup @@ -138,11 +139,11 @@ func (fs COREFixups) Apply(insns asm.Instructions) (asm.Instructions, error) { return cpy, nil } -// COREKind is the type of CO-RE relocation -type COREKind uint32 +// coreKind is the type of CO-RE relocation as specified in BPF source code. +type coreKind uint32 const ( - reloFieldByteOffset COREKind = iota /* field byte offset */ + reloFieldByteOffset coreKind = iota /* field byte offset */ reloFieldByteSize /* field size in bytes */ reloFieldExists /* field existence in target kernel */ reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */ @@ -156,7 +157,11 @@ const ( reloEnumvalValue /* enum value integer value */ ) -func (k COREKind) String() string { +func (k coreKind) checksForExistence() bool { + return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists +} + +func (k coreKind) String() string { switch k { case reloFieldByteOffset: return "byte_off" @@ -187,8 +192,20 @@ func (k COREKind) String() string { } } -func (k COREKind) checksForExistence() bool { - return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists +// FixupKind is the type of CO-RE relocation. +type FixupKind struct { + coreKind coreKind + + // the relocation is for a bitfield. This disables some validation. + bitfield bool +} + +func (fk FixupKind) String() string { + ck := fk.coreKind.String() + if fk.bitfield { + return ck + " (bitfield)" + } + return ck } func coreRelocate(local, target *Spec, relos CORERelos) (COREFixups, error) { @@ -208,7 +225,7 @@ func coreRelocate(local, target *Spec, relos CORERelos) (COREFixups, error) { } result[uint64(relo.insnOff)] = COREFixup{ - relo.kind, + FixupKind{coreKind: relo.kind}, uint32(relo.typeID), uint32(relo.typeID), false, @@ -241,7 +258,7 @@ func coreRelocate(local, target *Spec, relos CORERelos) (COREFixups, error) { relos := relosByID[id] targets := target.namedTypes[newEssentialName(localTypeName)] - fixups, err := coreCalculateFixups(localType, targets, relos) + fixups, err := coreCalculateFixups(local.byteOrder, localType, targets, relos) if err != nil { return nil, fmt.Errorf("relocate %s: %w", localType, err) } @@ -262,7 +279,7 @@ var errImpossibleRelocation = errors.New("impossible relocation") // // The best target is determined by scoring: the less poisoning we have to do // the better the target is. -func coreCalculateFixups(local Type, targets []Type, relos CORERelos) ([]COREFixup, error) { +func coreCalculateFixups(byteOrder binary.ByteOrder, local Type, targets []Type, relos CORERelos) ([]COREFixup, error) { localID := local.ID() local, err := copyType(local, skipQualifiersAndTypedefs) if err != nil { @@ -281,7 +298,7 @@ func coreCalculateFixups(local Type, targets []Type, relos CORERelos) ([]COREFix score := 0 // lower is better fixups := make([]COREFixup, 0, len(relos)) for _, relo := range relos { - fixup, err := coreCalculateFixup(local, localID, target, targetID, relo) + fixup, err := coreCalculateFixup(byteOrder, local, localID, target, targetID, relo) if err != nil { return nil, fmt.Errorf("target %s: %w", target, err) } @@ -317,7 +334,7 @@ func coreCalculateFixups(local Type, targets []Type, relos CORERelos) ([]COREFix // targets at all. Poison everything! bestFixups = make([]COREFixup, len(relos)) for i, relo := range relos { - bestFixups[i] = COREFixup{Kind: relo.kind, Poison: true} + bestFixups[i] = COREFixup{Kind: FixupKind{coreKind: relo.kind}, Poison: true} } } @@ -326,15 +343,15 @@ func coreCalculateFixups(local Type, targets []Type, relos CORERelos) ([]COREFix // coreCalculateFixup calculates the fixup for a single local type, target type // and relocation. -func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID, relo CORERelocation) (COREFixup, error) { +func coreCalculateFixup(byteOrder binary.ByteOrder, local Type, localID TypeID, target Type, targetID TypeID, relo CORERelocation) (COREFixup, error) { fixup := func(local, target uint32) (COREFixup, error) { - return COREFixup{relo.kind, local, target, false}, nil + return COREFixup{FixupKind{coreKind: relo.kind}, local, target, false}, nil } poison := func() (COREFixup, error) { if relo.kind.checksForExistence() { return fixup(1, 0) } - return COREFixup{relo.kind, 0, 0, true}, nil + return COREFixup{Kind: FixupKind{coreKind: relo.kind}, Local: 0, Target: 0, Poison: true}, nil } zero := COREFixup{} @@ -390,7 +407,22 @@ func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID return fixup(uint32(localValue.Value), uint32(targetValue.Value)) } - case reloFieldByteOffset, reloFieldByteSize, reloFieldExists: + case reloFieldSigned: + switch local.(type) { + case *Enum: + return fixup(1, 1) + case *Int: + return COREFixup{ + FixupKind{coreKind: relo.kind}, + uint32(local.(*Int).Encoding & Signed), + uint32(target.(*Int).Encoding & Signed), + false, + }, nil + default: + return fixup(0, 0) + } + + case reloFieldByteOffset, reloFieldByteSize, reloFieldExists, reloFieldLShiftU64, reloFieldRShiftU64: if _, ok := target.(*Fwd); ok { // We can't relocate fields using a forward declaration, so // skip it. If a non-forward declaration is present in the BTF @@ -405,13 +437,20 @@ func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID if err != nil { return zero, fmt.Errorf("target %s: %w", target, err) } + fixupField := func(local, target uint32) (COREFixup, error) { + fixupKind := FixupKind{ + coreKind: relo.kind, + bitfield: localField.bitfieldSize > 0 || targetField.bitfieldSize > 0, + } + return COREFixup{Kind: fixupKind, Local: local, Target: target, Poison: false}, nil + } switch relo.kind { case reloFieldExists: return fixup(1, 1) case reloFieldByteOffset: - return fixup(localField.offset/8, targetField.offset/8) + return fixupField(localField.offset/8, targetField.offset/8) case reloFieldByteSize: localSize, err := Sizeof(localField.Type) @@ -423,9 +462,23 @@ func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID if err != nil { return zero, err } + return fixupField(uint32(localSize), uint32(targetSize)) + + case reloFieldLShiftU64: + var target uint32 + if byteOrder == binary.LittleEndian { + target = 64 - (targetField.bitfieldOffset + targetField.bitfieldSize - targetField.offset) + } else { + targetSize, err := Sizeof(targetField.Type) + if err != nil { + return zero, err + } + target = (8-uint32(targetSize))*8 + (targetField.bitfieldOffset - targetField.offset) + } + return fixupField(0, target) - return fixup(uint32(localSize), uint32(targetSize)) - + case reloFieldRShiftU64: + return fixupField(0, 64-targetField.bitfieldSize) } } @@ -509,8 +562,18 @@ func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) { } type coreField struct { - Type Type + Type Type + + // offset is the load offset of the field in bytes. offset uint32 + + // bitfieldOffset is optional and is the offset of the bitfield in bits. + // Used with bitfieldSize to compute the shifts required to pull the bitfield + // out from the word loaded from 'offset'. + bitfieldOffset uint32 + + // bitfieldSize is the size of the bitfield in bits. + bitfieldSize uint32 } func adjustOffset(base uint32, t Type, n int) (uint32, error) { @@ -527,7 +590,7 @@ func adjustOffset(base uint32, t Type, n int) (uint32, error) { // // Returns the field and the offset of the field from the start of // target in bits. -func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreField, _ error) { +func coreFindField(local Type, localAcc coreAccessor, target Type) (coreField, coreField, error) { // The first index is used to offset a pointer of the base type like // when accessing an array. localOffset, err := adjustOffset(0, local, localAcc[0]) @@ -579,10 +642,6 @@ func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreFie return coreField{}, coreField{}, err } - if targetMember.BitfieldSize > 0 { - return coreField{}, coreField{}, fmt.Errorf("field %q is a bitfield: %w", targetMember.Name, ErrNotSupported) - } - local = localMember.Type localMaybeFlex = acc == len(localMembers)-1 localOffset += localMember.OffsetBits @@ -590,6 +649,51 @@ func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreFie targetMaybeFlex = last targetOffset += targetMember.OffsetBits + if targetMember.BitfieldSize == 0 && localMember.BitfieldSize == 0 { + break + } + + targetSize, err := Sizeof(target) + if err != nil { + return coreField{}, coreField{}, + fmt.Errorf("could not get target size: %w", err) + } + + targetBitfieldOffset := targetOffset + var targetBitfieldSize uint32 + + if targetMember.BitfieldSize > 0 { + // From the target BTF, we know the word size of the field containing the target bitfield + // and we know the bitfields offset in bits, and since we know the load is aligned, we can + // compute the load offset by: + // 1) convert the bit offset to bytes with a flooring division, yielding "byte" aligned offset. + // 2) dividing and multiplying that offset by the load size, yielding the target load size aligned offset. + targetBitfieldSize = targetMember.BitfieldSize + targetOffset = 8 * (targetOffset / 8 / uint32(targetSize) * uint32(targetSize)) + + // As sanity check, verify that the bitfield is captured by the chosen load. This should only happen + // if one of the two assumptions are broken: the bitfield size is smaller than the type of the variable + // and that the loads are aligned. + if targetOffset > targetBitfieldOffset || + targetOffset+uint32(targetSize)*8 < targetBitfieldOffset+targetMember.BitfieldSize { + return coreField{}, coreField{}, + fmt.Errorf("could not find load for bitfield: load of %d bytes at %d does not capture bitfield of size %d at %d", + targetSize, targetOffset, targetMember.BitfieldSize, targetBitfieldOffset) + } + } else { + // Going from a bitfield to a normal field. Since the original BTF had it as a bitfield, we'll + // need to "emulate" a bitfield in target to compute the shifts correctly. + targetBitfieldSize = uint32(targetSize * 8) + } + + if err := coreAreMembersCompatible(local, target); err != nil { + return coreField{}, coreField{}, err + } + + return coreField{local, localOffset, localMember.OffsetBits, localMember.BitfieldSize}, + coreField{target, targetOffset, targetBitfieldOffset, targetBitfieldSize}, + nil + case *Array: // For arrays, acc is the index in the target. targetType, ok := target.(*Array) @@ -634,7 +738,7 @@ func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreFie } } - return coreField{local, localOffset}, coreField{target, targetOffset}, nil + return coreField{local, localOffset, 0, 0}, coreField{target, targetOffset, 0, 0}, nil } // coreFindMember finds a member in a composite type while handling anonymous diff --git a/internal/btf/core_test.go b/internal/btf/core_test.go index 593dded26..c67142211 100644 --- a/internal/btf/core_test.go +++ b/internal/btf/core_test.go @@ -233,14 +233,24 @@ func TestCOREFindField(t *testing.T) { aFields := []Member{ {Name: "foo", Type: ptr, OffsetBits: 1}, {Name: "bar", Type: u16, OffsetBits: 2}, + {Name: "baz", Type: u32, OffsetBits: 32, BitfieldSize: 3}, + {Name: "quux", Type: u32, OffsetBits: 35, BitfieldSize: 10}, + {Name: "quuz", Type: u32, OffsetBits: 45, BitfieldSize: 8}, } bFields := []Member{ {Name: "foo", Type: ptr, OffsetBits: 10}, {Name: "bar", Type: u32, OffsetBits: 20}, {Name: "other", OffsetBits: 4}, + // baz is separated out from the other bitfields + {Name: "baz", Type: u32, OffsetBits: 64, BitfieldSize: 3}, + // quux's type changes u32->u16 + {Name: "quux", Type: u16, OffsetBits: 96, BitfieldSize: 10}, + // quuz becomes a normal field + {Name: "quuz", Type: u16, OffsetBits: 112}, } - aStruct := &Struct{Members: aFields, Size: 2} - bStruct := &Struct{Members: bFields, Size: 7} + + aStruct := &Struct{Members: aFields, Size: 48} + bStruct := &Struct{Members: bFields, Size: 80} aArray := &Array{Nelems: 4, Type: u16} bArray := &Array{Nelems: 3, Type: u32} @@ -340,83 +350,83 @@ func TestCOREFindField(t *testing.T) { aArray, bArray, coreAccessor{0, 0}, - coreField{u16, 0}, - coreField{u32, 0}, + coreField{u16, 0, 0, 0}, + coreField{u32, 0, 0, 0}, }, { "array[1]", aArray, bArray, coreAccessor{0, 1}, - coreField{u16, bits(aArray.Type)}, - coreField{u32, bits(bArray.Type)}, + coreField{u16, bits(aArray.Type), 0, 0}, + coreField{u32, bits(bArray.Type), 0, 0}, }, { "array[0] with base offset", aArray, bArray, coreAccessor{1, 0}, - coreField{u16, bits(aArray)}, - coreField{u32, bits(bArray)}, + coreField{u16, bits(aArray), 0, 0}, + coreField{u32, bits(bArray), 0, 0}, }, { "array[2] with base offset", aArray, bArray, coreAccessor{1, 2}, - coreField{u16, bits(aArray) + 2*bits(aArray.Type)}, - coreField{u32, bits(bArray) + 2*bits(bArray.Type)}, + coreField{u16, bits(aArray) + 2*bits(aArray.Type), 0, 0}, + coreField{u32, bits(bArray) + 2*bits(bArray.Type), 0, 0}, }, { "flex array", &Struct{Members: []Member{{Name: "foo", Type: &Array{Nelems: 0, Type: u16}}}}, &Struct{Members: []Member{{Name: "foo", Type: &Array{Nelems: 0, Type: u32}}}}, coreAccessor{0, 0, 9000}, - coreField{u16, bits(u16) * 9000}, - coreField{u32, bits(u32) * 9000}, + coreField{u16, bits(u16) * 9000, 0, 0}, + coreField{u32, bits(u32) * 9000, 0, 0}, }, { "struct.0", aStruct, bStruct, coreAccessor{0, 0}, - coreField{ptr, 1}, - coreField{ptr, 10}, + coreField{ptr, 1, 0, 0}, + coreField{ptr, 10, 0, 0}, }, { "struct.0 anon", aStruct, &Struct{Members: anon(bStruct, 23)}, coreAccessor{0, 0}, - coreField{ptr, 1}, - coreField{ptr, 23 + 10}, + coreField{ptr, 1, 0, 0}, + coreField{ptr, 23 + 10, 0, 0}, }, { "struct.0 with base offset", aStruct, bStruct, coreAccessor{3, 0}, - coreField{ptr, 3*bits(aStruct) + 1}, - coreField{ptr, 3*bits(bStruct) + 10}, + coreField{ptr, 3*bits(aStruct) + 1, 0, 0}, + coreField{ptr, 3*bits(bStruct) + 10, 0, 0}, }, { "struct.1", aStruct, bStruct, coreAccessor{0, 1}, - coreField{u16, 2}, - coreField{u32, 20}, + coreField{u16, 2, 0, 0}, + coreField{u32, 20, 0, 0}, }, { "struct.1 anon", aStruct, &Struct{Members: anon(bStruct, 1)}, coreAccessor{0, 1}, - coreField{u16, 2}, - coreField{u32, 1 + 20}, + coreField{u16, 2, 0, 0}, + coreField{u32, 1 + 20, 0, 0}, }, { "union.1", &Union{Members: aFields, Size: 32}, &Union{Members: bFields, Size: 32}, coreAccessor{0, 1}, - coreField{u16, 2}, - coreField{u32, 20}, + coreField{u16, 2, 0, 0}, + coreField{u32, 20, 0, 0}, }, { "interchangeable composites", @@ -431,8 +441,29 @@ func TestCOREFindField(t *testing.T) { }, }, coreAccessor{0, 0, 0, 0}, - coreField{u16, 0}, - coreField{u16, 0}, + coreField{u16, 0, 0, 0}, + coreField{u16, 0, 0, 0}, + }, + { + "struct.2 (bitfield baz)", + aStruct, bStruct, + coreAccessor{0, 2}, + coreField{u32, 32, 32, 3}, + coreField{u32, 64, 64, 3}, + }, + { + "struct.3 (bitfield quux)", + aStruct, bStruct, + coreAccessor{0, 3}, + coreField{u32, 35, 35, 10}, + coreField{u16, 96, 96, 10}, + }, + { + "struct.4 (bitfield quuz)", + aStruct, bStruct, + coreAccessor{0, 4}, + coreField{u32, 45, 45, 3}, + coreField{u16, 112, 112, 16}, }, } @@ -522,10 +553,10 @@ func TestCORERelocation(t *testing.T) { } for offset, relo := range relos { - if relo.Local != relo.Target { + if want := relo.Local; !relo.Kind.bitfield && want != relo.Target { // Since we're relocating against ourselves both values // should match. - t.Errorf("offset %d: local %v doesn't match target %d", offset, relo.Local, relo.Target) + t.Errorf("offset %d: local %v doesn't match target %d (kind %s)", offset, relo.Local, relo.Target, relo.Kind) } } }) diff --git a/internal/btf/ext_info.go b/internal/btf/ext_info.go index be614e782..39ce7b068 100644 --- a/internal/btf/ext_info.go +++ b/internal/btf/ext_info.go @@ -435,14 +435,14 @@ type bpfCORERelo struct { InsnOff uint32 TypeID TypeID AccessStrOff uint32 - Kind COREKind + Kind coreKind } type CORERelocation struct { insnOff uint32 typeID TypeID accessor coreAccessor - kind COREKind + kind coreKind } type CORERelos []CORERelocation diff --git a/internal/btf/testdata/relocs.c b/internal/btf/testdata/relocs.c index 24fae514c..7b559a076 100644 --- a/internal/btf/testdata/relocs.c +++ b/internal/btf/testdata/relocs.c @@ -238,3 +238,4 @@ __section("socket_filter/err_ambiguous") int err_ambiguous() { __section("socket_filter/err_ambiguous_flavour") int err_ambiguous_flavour() { return bpf_core_type_id_kernel(struct ambiguous___flavour); } + diff --git a/internal/btf/testdata/relocs_read-eb.elf b/internal/btf/testdata/relocs_read-eb.elf index d5e6c328f..b13859d74 100644 Binary files a/internal/btf/testdata/relocs_read-eb.elf and b/internal/btf/testdata/relocs_read-eb.elf differ diff --git a/internal/btf/testdata/relocs_read-el.elf b/internal/btf/testdata/relocs_read-el.elf index 50b6c886a..78b9c32ac 100644 Binary files a/internal/btf/testdata/relocs_read-el.elf and b/internal/btf/testdata/relocs_read-el.elf differ diff --git a/internal/btf/testdata/relocs_read.c b/internal/btf/testdata/relocs_read.c index ccb4f499a..0f6cb4f1e 100644 --- a/internal/btf/testdata/relocs_read.c +++ b/internal/btf/testdata/relocs_read.c @@ -1,7 +1,10 @@ #include "../../../testdata/common.h" +#include "bpf_core_read.h" #define core_access __builtin_preserve_access_index +char _license[] __attribute__((section(("license")), used)) = "GPL"; + // Struct with the members declared in the wrong order. Accesses need // a successful CO-RE relocation against the type in relocs_read_tgt.c // for the test below to pass. @@ -10,6 +13,21 @@ struct s { char a; }; +typedef unsigned char u8; +typedef unsigned short u16; +typedef unsigned int u32; +typedef unsigned long u64; + +// Struct with bitfields. +struct bits { + int x; + u8 a:4, b:2; + u16 c:1; + unsigned int d:2; + enum { ZERO = 0, ONE = 1 } e:1; + u64 f:16, g:30; +}; + // Perform a read from a subprog to ensure CO-RE relocations // occurring there are tracked and executed in the final linked program. __attribute__((noinline)) int read_subprog() { @@ -21,8 +39,57 @@ __attribute__((noinline)) int read_subprog() { if (core_access(foo.a) == 0) return __LINE__; - if (core_access(foo.b) == 1) - return __LINE__; + if (core_access(foo.b) == 1) + return __LINE__; + + struct bits bar; + char *p = (char *)&bar; + /* Target: + * [4] STRUCT 'bits' size=8 vlen=7 + * 'b' type_id=5 bits_offset=0 bitfield_size=2 + * 'a' type_id=5 bits_offset=2 bitfield_size=4 + * 'd' type_id=7 bits_offset=6 bitfield_size=2 + * 'c' type_id=9 bits_offset=8 bitfield_size=1 + * 'e' type_id=11 bits_offset=9 bitfield_size=1 + * 'f' type_id=9 bits_offset=16 + * 'g' type_id=12 bits_offset=32 bitfield_size=30 + */ + *p++ = 0xff; // a, b, d + *p++ = 0x00; // c, e + *p++ = 0x56; // f + *p++ = 0x56; // f +#ifdef __BIG_ENDIAN__ + *p++ = 0x55; // g + *p++ = 0x44; // g + *p++ = 0x33; // g + *p++ = 0x22; // g +#else + *p++ = 0x22; // g + *p++ = 0x33; // g + *p++ = 0x44; // g + *p++ = 0x55; // g +#endif + + if (BPF_CORE_READ_BITFIELD(&bar, a) != (1<<4)-1) + return __LINE__; + + if (BPF_CORE_READ_BITFIELD(&bar, b) != (1<<2)-1) + return __LINE__; + + if (BPF_CORE_READ_BITFIELD(&bar, d) != (1<<2)-1) + return __LINE__; + + if (BPF_CORE_READ_BITFIELD(&bar, c) != 0) + return __LINE__; + + if (BPF_CORE_READ_BITFIELD(&bar, e) != 0) + return __LINE__; + + if (BPF_CORE_READ_BITFIELD(&bar, f) != 0x5656) + return __LINE__; + + if (BPF_CORE_READ_BITFIELD(&bar, g) != 0x15443322) + return __LINE__; return 0; } diff --git a/internal/btf/testdata/relocs_read_tgt-eb.elf b/internal/btf/testdata/relocs_read_tgt-eb.elf index b0effd5f8..c09e1d0a5 100644 Binary files a/internal/btf/testdata/relocs_read_tgt-eb.elf and b/internal/btf/testdata/relocs_read_tgt-eb.elf differ diff --git a/internal/btf/testdata/relocs_read_tgt-el.elf b/internal/btf/testdata/relocs_read_tgt-el.elf index 34c056daa..4b1b33bcb 100644 Binary files a/internal/btf/testdata/relocs_read_tgt-el.elf and b/internal/btf/testdata/relocs_read_tgt-el.elf differ diff --git a/internal/btf/testdata/relocs_read_tgt.c b/internal/btf/testdata/relocs_read_tgt.c index 38bb4b156..5a298a8fc 100644 --- a/internal/btf/testdata/relocs_read_tgt.c +++ b/internal/btf/testdata/relocs_read_tgt.c @@ -11,6 +11,23 @@ struct s { char b; }; +typedef unsigned int my_u32; +typedef unsigned char u8; +typedef unsigned short u16; +typedef unsigned int u32; +typedef unsigned long u64; + +struct bits { + /*int x;*/ + u8 b:2, a:4; /* a was before b */ + my_u32 d:2; /* was 'unsigned int' */ + u16 c:1; /* was before d */ + enum { ZERO = 0, ONE = 1 } e:1; + u16 f; /* was: u64 f:16 */ + u32 g:30; /* was: u64 g:30 */ +}; + int dummy() { - return core_access((struct s){}.a); + return core_access((struct s){}.a) + + core_access((struct bits){}.a); }