Skip to content

Commit

Permalink
btf: Implement support for bitfield relocations
Browse files Browse the repository at this point in the history
This adds support for relocating bitfields, e.g. rewriting of
the load offset and bit shifts for accessing the field.

The implementation is mostly following libbpf, but does not
include the loop for doubling the size of the load as that should
not be necessary when one assumes that loads must be aligned and
that a bitfield cannot be larger than the variable type.

Signed-off-by: Jussi Maki <jussi@isovalent.com>
  • Loading branch information
joamaki committed Mar 15, 2022
1 parent bf256fd commit 16c2fb0
Show file tree
Hide file tree
Showing 11 changed files with 289 additions and 66 deletions.
3 changes: 3 additions & 0 deletions elf_reader_test.go
Expand Up @@ -739,6 +739,9 @@ func TestLibBPFCompat(t *testing.T) {
case "btf__core_reloc_type_id___missing_targets.o",
"btf__core_reloc_flavors__err_wrong_name.o":
valid = false
case "btf__core_reloc_ints___err_bitfield.o":
// Bitfields are now valid.
valid = true
default:
valid = !strings.Contains(name, "___err_")
}
Expand Down
170 changes: 137 additions & 33 deletions internal/btf/core.go
@@ -1,6 +1,7 @@
package btf

import (
"encoding/binary"
"errors"
"fmt"
"math"
Expand All @@ -17,7 +18,7 @@ import (

// COREFixup is the result of computing a CO-RE relocation for a target.
type COREFixup struct {
Kind COREKind
Kind FixupKind
Local uint32
Target uint32
Poison bool
Expand All @@ -41,8 +42,8 @@ func (f COREFixup) apply(ins *asm.Instruction) error {

switch class := ins.OpCode.Class(); class {
case asm.LdXClass, asm.StClass, asm.StXClass:
if want := int16(f.Local); want != ins.Offset {
return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, want)
if want := f.Local; !f.Kind.bitfield && int16(want) != ins.Offset {
return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, f.Local)
}

if f.Target > math.MaxInt16 {
Expand All @@ -56,8 +57,8 @@ func (f COREFixup) apply(ins *asm.Instruction) error {
return fmt.Errorf("not a dword-sized immediate load")
}

if want := int64(f.Local); want != ins.Constant {
return fmt.Errorf("invalid immediate %d, expected %d", ins.Constant, want)
if want := f.Local; !f.Kind.bitfield && int64(want) != ins.Constant {
return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v)", ins.Constant, want, f)
}

ins.Constant = int64(f.Target)
Expand All @@ -74,8 +75,8 @@ func (f COREFixup) apply(ins *asm.Instruction) error {
return fmt.Errorf("invalid source %s", src)
}

if want := int64(f.Local); want != ins.Constant {
return fmt.Errorf("invalid immediate %d, expected %d", ins.Constant, want)
if want := f.Local; !f.Kind.bitfield && int64(want) != ins.Constant {
return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v, kind: %v, ins: %v)", ins.Constant, want, f, f.Kind, ins)
}

if f.Target > math.MaxInt32 {
Expand All @@ -92,7 +93,7 @@ func (f COREFixup) apply(ins *asm.Instruction) error {
}

func (f COREFixup) isNonExistant() bool {
return f.Kind.checksForExistence() && f.Target == 0
return f.Kind.coreKind.checksForExistence() && f.Target == 0
}

type COREFixups map[uint64]COREFixup
Expand Down Expand Up @@ -138,11 +139,11 @@ func (fs COREFixups) Apply(insns asm.Instructions) (asm.Instructions, error) {
return cpy, nil
}

// COREKind is the type of CO-RE relocation
type COREKind uint32
// coreKind is the type of CO-RE relocation as specified in BPF source code.
type coreKind uint32

const (
reloFieldByteOffset COREKind = iota /* field byte offset */
reloFieldByteOffset coreKind = iota /* field byte offset */
reloFieldByteSize /* field size in bytes */
reloFieldExists /* field existence in target kernel */
reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */
Expand All @@ -156,7 +157,11 @@ const (
reloEnumvalValue /* enum value integer value */
)

func (k COREKind) String() string {
func (k coreKind) checksForExistence() bool {
return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists
}

func (k coreKind) String() string {
switch k {
case reloFieldByteOffset:
return "byte_off"
Expand Down Expand Up @@ -187,8 +192,20 @@ func (k COREKind) String() string {
}
}

func (k COREKind) checksForExistence() bool {
return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists
// FixupKind is the type of CO-RE relocation.
type FixupKind struct {
coreKind coreKind

// the relocation is for a bitfield. This disables some validation.
bitfield bool
}

func (fk FixupKind) String() string {
ck := fk.coreKind.String()
if fk.bitfield {
return ck + " (bitfield)"
}
return ck
}

func coreRelocate(local, target *Spec, relos CORERelos) (COREFixups, error) {
Expand All @@ -208,7 +225,7 @@ func coreRelocate(local, target *Spec, relos CORERelos) (COREFixups, error) {
}

result[uint64(relo.insnOff)] = COREFixup{
relo.kind,
FixupKind{coreKind: relo.kind},
uint32(relo.typeID),
uint32(relo.typeID),
false,
Expand Down Expand Up @@ -241,7 +258,7 @@ func coreRelocate(local, target *Spec, relos CORERelos) (COREFixups, error) {

relos := relosByID[id]
targets := target.namedTypes[newEssentialName(localTypeName)]
fixups, err := coreCalculateFixups(localType, targets, relos)
fixups, err := coreCalculateFixups(local.byteOrder, localType, targets, relos)
if err != nil {
return nil, fmt.Errorf("relocate %s: %w", localType, err)
}
Expand All @@ -262,7 +279,7 @@ var errImpossibleRelocation = errors.New("impossible relocation")
//
// The best target is determined by scoring: the less poisoning we have to do
// the better the target is.
func coreCalculateFixups(local Type, targets []Type, relos CORERelos) ([]COREFixup, error) {
func coreCalculateFixups(byteOrder binary.ByteOrder, local Type, targets []Type, relos CORERelos) ([]COREFixup, error) {
localID := local.ID()
local, err := copyType(local, skipQualifiersAndTypedefs)
if err != nil {
Expand All @@ -281,7 +298,7 @@ func coreCalculateFixups(local Type, targets []Type, relos CORERelos) ([]COREFix
score := 0 // lower is better
fixups := make([]COREFixup, 0, len(relos))
for _, relo := range relos {
fixup, err := coreCalculateFixup(local, localID, target, targetID, relo)
fixup, err := coreCalculateFixup(byteOrder, local, localID, target, targetID, relo)
if err != nil {
return nil, fmt.Errorf("target %s: %w", target, err)
}
Expand Down Expand Up @@ -317,7 +334,7 @@ func coreCalculateFixups(local Type, targets []Type, relos CORERelos) ([]COREFix
// targets at all. Poison everything!
bestFixups = make([]COREFixup, len(relos))
for i, relo := range relos {
bestFixups[i] = COREFixup{Kind: relo.kind, Poison: true}
bestFixups[i] = COREFixup{Kind: FixupKind{coreKind: relo.kind}, Poison: true}
}
}

Expand All @@ -326,15 +343,15 @@ func coreCalculateFixups(local Type, targets []Type, relos CORERelos) ([]COREFix

// coreCalculateFixup calculates the fixup for a single local type, target type
// and relocation.
func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID, relo CORERelocation) (COREFixup, error) {
func coreCalculateFixup(byteOrder binary.ByteOrder, local Type, localID TypeID, target Type, targetID TypeID, relo CORERelocation) (COREFixup, error) {
fixup := func(local, target uint32) (COREFixup, error) {
return COREFixup{relo.kind, local, target, false}, nil
return COREFixup{FixupKind{coreKind: relo.kind}, local, target, false}, nil
}
poison := func() (COREFixup, error) {
if relo.kind.checksForExistence() {
return fixup(1, 0)
}
return COREFixup{relo.kind, 0, 0, true}, nil
return COREFixup{Kind: FixupKind{coreKind: relo.kind}, Local: 0, Target: 0, Poison: true}, nil
}
zero := COREFixup{}

Expand Down Expand Up @@ -390,7 +407,22 @@ func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID
return fixup(uint32(localValue.Value), uint32(targetValue.Value))
}

case reloFieldByteOffset, reloFieldByteSize, reloFieldExists:
case reloFieldSigned:
switch local.(type) {
case *Enum:
return fixup(1, 1)
case *Int:
return COREFixup{
FixupKind{coreKind: relo.kind},
uint32(local.(*Int).Encoding & Signed),
uint32(target.(*Int).Encoding & Signed),
false,
}, nil
default:
return fixup(0, 0)
}

case reloFieldByteOffset, reloFieldByteSize, reloFieldExists, reloFieldLShiftU64, reloFieldRShiftU64:
if _, ok := target.(*Fwd); ok {
// We can't relocate fields using a forward declaration, so
// skip it. If a non-forward declaration is present in the BTF
Expand All @@ -405,13 +437,20 @@ func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID
if err != nil {
return zero, fmt.Errorf("target %s: %w", target, err)
}
fixupField := func(local, target uint32) (COREFixup, error) {
fixupKind := FixupKind{
coreKind: relo.kind,
bitfield: localField.bitfieldSize > 0 || targetField.bitfieldSize > 0,
}
return COREFixup{Kind: fixupKind, Local: local, Target: target, Poison: false}, nil
}

switch relo.kind {
case reloFieldExists:
return fixup(1, 1)

case reloFieldByteOffset:
return fixup(localField.offset/8, targetField.offset/8)
return fixupField(localField.offset/8, targetField.offset/8)

case reloFieldByteSize:
localSize, err := Sizeof(localField.Type)
Expand All @@ -423,9 +462,23 @@ func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID
if err != nil {
return zero, err
}
return fixupField(uint32(localSize), uint32(targetSize))

case reloFieldLShiftU64:
var target uint32
if byteOrder == binary.LittleEndian {
target = 64 - (targetField.bitfieldOffset + targetField.bitfieldSize - targetField.offset)
} else {
targetSize, err := Sizeof(targetField.Type)
if err != nil {
return zero, err
}
target = (8-uint32(targetSize))*8 + (targetField.bitfieldOffset - targetField.offset)
}
return fixupField(0, target)

return fixup(uint32(localSize), uint32(targetSize))

case reloFieldRShiftU64:
return fixupField(0, 64-targetField.bitfieldSize)
}
}

Expand Down Expand Up @@ -509,8 +562,18 @@ func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) {
}

type coreField struct {
Type Type
Type Type

// offset is the load offset of the field in bytes.
offset uint32

// bitfieldOffset is optional and is the offset of the bitfield in bits.
// Used with bitfieldSize to compute the shifts required to pull the bitfield
// out from the word loaded from 'offset'.
bitfieldOffset uint32

// bitfieldSize is the size of the bitfield in bits.
bitfieldSize uint32
}

func adjustOffset(base uint32, t Type, n int) (uint32, error) {
Expand All @@ -527,7 +590,7 @@ func adjustOffset(base uint32, t Type, n int) (uint32, error) {
//
// Returns the field and the offset of the field from the start of
// target in bits.
func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreField, _ error) {
func coreFindField(local Type, localAcc coreAccessor, target Type) (coreField, coreField, error) {
// The first index is used to offset a pointer of the base type like
// when accessing an array.
localOffset, err := adjustOffset(0, local, localAcc[0])
Expand Down Expand Up @@ -579,17 +642,58 @@ func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreFie
return coreField{}, coreField{}, err
}

if targetMember.BitfieldSize > 0 {
return coreField{}, coreField{}, fmt.Errorf("field %q is a bitfield: %w", targetMember.Name, ErrNotSupported)
}

local = localMember.Type
localMaybeFlex = acc == len(localMembers)-1
localOffset += localMember.OffsetBits
target = targetMember.Type
targetMaybeFlex = last
targetOffset += targetMember.OffsetBits

if targetMember.BitfieldSize == 0 && localMember.BitfieldSize == 0 {
break
}

targetSize, err := Sizeof(target)
if err != nil {
return coreField{}, coreField{},
fmt.Errorf("could not get target size: %w", err)
}

targetBitfieldOffset := targetOffset
var targetBitfieldSize uint32

if targetMember.BitfieldSize > 0 {
// From the target BTF, we know the word size of the field containing the target bitfield
// and we know the bitfields offset in bits, and since we know the load is aligned, we can
// compute the load offset by:
// 1) convert the bit offset to bytes with a flooring division, yielding "byte" aligned offset.
// 2) dividing and multiplying that offset by the load size, yielding the target load size aligned offset.
targetBitfieldSize = targetMember.BitfieldSize
targetOffset = 8 * (targetOffset / 8 / uint32(targetSize) * uint32(targetSize))

// As sanity check, verify that the bitfield is captured by the chosen load. This should only happen
// if one of the two assumptions are broken: the bitfield size is smaller than the type of the variable
// and that the loads are aligned.
if targetOffset > targetBitfieldOffset ||
targetOffset+uint32(targetSize)*8 < targetBitfieldOffset+targetMember.BitfieldSize {
return coreField{}, coreField{},
fmt.Errorf("could not find load for bitfield: load of %d bytes at %d does not capture bitfield of size %d at %d",
targetSize, targetOffset, targetMember.BitfieldSize, targetBitfieldOffset)
}
} else {
// Going from a bitfield to a normal field. Since the original BTF had it as a bitfield, we'll
// need to "emulate" a bitfield in target to compute the shifts correctly.
targetBitfieldSize = uint32(targetSize * 8)
}

if err := coreAreMembersCompatible(local, target); err != nil {
return coreField{}, coreField{}, err
}

return coreField{local, localOffset, localMember.OffsetBits, localMember.BitfieldSize},
coreField{target, targetOffset, targetBitfieldOffset, targetBitfieldSize},
nil

case *Array:
// For arrays, acc is the index in the target.
targetType, ok := target.(*Array)
Expand Down Expand Up @@ -634,7 +738,7 @@ func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreFie
}
}

return coreField{local, localOffset}, coreField{target, targetOffset}, nil
return coreField{local, localOffset, 0, 0}, coreField{target, targetOffset, 0, 0}, nil
}

// coreFindMember finds a member in a composite type while handling anonymous
Expand Down

0 comments on commit 16c2fb0

Please sign in to comment.