Skip to content

Commit

Permalink
btf: Implement support for bitfield relocations
Browse files Browse the repository at this point in the history
Bitfields are implemented by computing a new load size and offset
for the target by starting with a 1 byte load at the offset of
the target bitfield and growing the load size until we either
capture the bitfield, or go over 8 bytes. This algorithm differs
from the one in libbpf which uses the original load size and grows
it to capture the field, which may overrun the size of the struct
if the bitfield is moved around close to the end of the struct.

The left and right shifts for the target are then adjusted accordingly
the same way as in libbpf.

Since we don't know the size of the loads decided by the compiler we
cannot compute "local" and cannot validate that when doing the relocation
and hence I refactored the code to make COREFixup.Local optional.

Signed-off-by: Jussi Maki <jussi@isovalent.com>
  • Loading branch information
joamaki committed Feb 23, 2022
1 parent ba2072b commit d928ec2
Show file tree
Hide file tree
Showing 7 changed files with 197 additions and 60 deletions.
3 changes: 3 additions & 0 deletions elf_reader_test.go
Expand Up @@ -739,6 +739,9 @@ func TestLibBPFCompat(t *testing.T) {
case "btf__core_reloc_type_id___missing_targets.o",
"btf__core_reloc_flavors__err_wrong_name.o":
valid = false
case "btf__core_reloc_ints___err_bitfield.o":
// Bitfields are now valid.
valid = true
default:
valid = !strings.Contains(name, "___err_")
}
Expand Down
150 changes: 120 additions & 30 deletions internal/btf/core.go
@@ -1,6 +1,7 @@
package btf

import (
"encoding/binary"
"errors"
"fmt"
"math"
Expand All @@ -15,10 +16,18 @@ import (
// Code in this file is derived from libbpf, which is available under a BSD
// 2-Clause license.

type OptionalLocalValue int64

const unsetLocalValue = OptionalLocalValue(-1)

func (v OptionalLocalValue) Get() (uint32, bool) {
return uint32(v), v >= 0
}

// COREFixup is the result of computing a CO-RE relocation for a target.
type COREFixup struct {
Kind COREKind
Local uint32
Local OptionalLocalValue
Target uint32
Poison bool
}
Expand All @@ -41,8 +50,8 @@ func (f COREFixup) apply(ins *asm.Instruction) error {

switch class := ins.OpCode.Class(); class {
case asm.LdXClass, asm.StClass, asm.StXClass:
if want := int16(f.Local); want != ins.Offset {
return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, want)
if want, ok := f.Local.Get(); ok && int16(want) != ins.Offset {
return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, f.Local)
}

if f.Target > math.MaxInt16 {
Expand All @@ -56,8 +65,8 @@ func (f COREFixup) apply(ins *asm.Instruction) error {
return fmt.Errorf("not a dword-sized immediate load")
}

if want := int64(f.Local); want != ins.Constant {
return fmt.Errorf("invalid immediate %d, expected %d", ins.Constant, want)
if want, ok := f.Local.Get(); ok && int64(want) != ins.Constant {
return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v)", ins.Constant, want, f)
}

ins.Constant = int64(f.Target)
Expand All @@ -74,8 +83,8 @@ func (f COREFixup) apply(ins *asm.Instruction) error {
return fmt.Errorf("invalid source %s", src)
}

if want := int64(f.Local); want != ins.Constant {
return fmt.Errorf("invalid immediate %d, expected %d", ins.Constant, want)
if want, ok := f.Local.Get(); ok && int64(want) != ins.Constant {
return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v, kind: %v, ins: %v)", ins.Constant, want, f, f.Kind, ins)
}

if f.Target > math.MaxInt32 {
Expand Down Expand Up @@ -208,10 +217,10 @@ func coreRelocate(local, target *Spec, relos CoreRelos) (COREFixups, error) {
}

result[uint64(relo.insnOff)] = COREFixup{
relo.kind,
uint32(relo.typeID),
uint32(relo.typeID),
false,
Kind: relo.kind,
Local: OptionalLocalValue(relo.typeID),
Target: uint32(relo.typeID),
Poison: false,
}
continue
}
Expand Down Expand Up @@ -241,7 +250,7 @@ func coreRelocate(local, target *Spec, relos CoreRelos) (COREFixups, error) {

relos := relosByID[id]
targets := target.namedTypes[newEssentialName(localTypeName)]
fixups, err := coreCalculateFixups(localType, targets, relos)
fixups, err := coreCalculateFixups(local.byteOrder, localType, targets, relos)
if err != nil {
return nil, fmt.Errorf("relocate %s: %w", localType, err)
}
Expand All @@ -262,7 +271,7 @@ var errImpossibleRelocation = errors.New("impossible relocation")
//
// The best target is determined by scoring: the less poisoning we have to do
// the better the target is.
func coreCalculateFixups(local Type, targets []Type, relos CoreRelos) ([]COREFixup, error) {
func coreCalculateFixups(byteOrder binary.ByteOrder, local Type, targets []Type, relos CoreRelos) ([]COREFixup, error) {
localID := local.ID()
local, err := copyType(local, skipQualifiersAndTypedefs)
if err != nil {
Expand All @@ -281,7 +290,7 @@ func coreCalculateFixups(local Type, targets []Type, relos CoreRelos) ([]COREFix
score := 0 // lower is better
fixups := make([]COREFixup, 0, len(relos))
for _, relo := range relos {
fixup, err := coreCalculateFixup(local, localID, target, targetID, relo)
fixup, err := coreCalculateFixup(byteOrder, local, localID, target, targetID, relo)
if err != nil {
return nil, fmt.Errorf("target %s: %w", target, err)
}
Expand Down Expand Up @@ -326,15 +335,15 @@ func coreCalculateFixups(local Type, targets []Type, relos CoreRelos) ([]COREFix

// coreCalculateFixup calculates the fixup for a single local type, target type
// and relocation.
func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID, relo CoreRelo) (COREFixup, error) {
fixup := func(local, target uint32) (COREFixup, error) {
return COREFixup{relo.kind, local, target, false}, nil
func coreCalculateFixup(byteOrder binary.ByteOrder, local Type, localID TypeID, target Type, targetID TypeID, relo CoreRelo) (COREFixup, error) {
fixup := func(local uint32, target uint32) (COREFixup, error) {
return COREFixup{Kind: relo.kind, Local: OptionalLocalValue(local), Target: target, Poison: false}, nil
}
poison := func() (COREFixup, error) {
if relo.kind.checksForExistence() {
return fixup(1, 0)
}
return COREFixup{relo.kind, 0, 0, true}, nil
return COREFixup{Kind: relo.kind, Local: 0, Target: 0, Poison: true}, nil
}
zero := COREFixup{}

Expand Down Expand Up @@ -390,7 +399,23 @@ func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID
return fixup(uint32(localValue.Value), uint32(targetValue.Value))
}

case reloFieldByteOffset, reloFieldByteSize, reloFieldExists:
case reloFieldSigned:
switch local.(type) {
case *Enum:
return fixup(1, 1)
case *Int:
return COREFixup{Kind: relo.kind,
Local: unsetLocalValue,
Target: uint32(target.(*Int).Encoding & Signed),
Poison: false}, nil
default:
return COREFixup{Kind: relo.kind,
Local: unsetLocalValue,
Target: 0,
Poison: false}, nil
}

case reloFieldByteOffset, reloFieldByteSize, reloFieldExists, reloFieldLShiftU64, reloFieldRShiftU64:
if _, ok := target.(*Fwd); ok {
// We can't relocate fields using a forward declaration, so
// skip it. If a non-forward declaration is present in the BTF
Expand All @@ -405,13 +430,21 @@ func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID
if err != nil {
return zero, fmt.Errorf("target %s: %w", target, err)
}
fixupField := func(local, target uint32) (COREFixup, error) {
if localField.bitfieldSize > 0 || targetField.bitfieldSize > 0 {
// Do not validate 'local' for bitfields as we cannot know how wide of a load the compiler
// decided to use.
return COREFixup{Kind: relo.kind, Local: unsetLocalValue, Target: target, Poison: false}, nil
}
return COREFixup{Kind: relo.kind, Local: OptionalLocalValue(local), Target: target, Poison: false}, nil
}

switch relo.kind {
case reloFieldExists:
return fixup(1, 1)
return fixupField(1, 1)

case reloFieldByteOffset:
return fixup(localField.offset/8, targetField.offset/8)
return fixupField(localField.offset/8, targetField.offset/8)

case reloFieldByteSize:
localSize, err := Sizeof(localField.Type)
Expand All @@ -423,9 +456,23 @@ func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID
if err != nil {
return zero, err
}
return fixupField(uint32(localSize), uint32(targetSize))

return fixup(uint32(localSize), uint32(targetSize))
case reloFieldLShiftU64:
targetSize, err := Sizeof(targetField.Type)
if err != nil {
return zero, err
}
var target uint32
if byteOrder == binary.LittleEndian {
target = 64 - (targetField.bitfieldOffset + targetField.bitfieldSize - targetField.offset)
} else {
target = (8-uint32(targetSize))*8 + (targetField.bitfieldOffset - targetField.offset)
}
return COREFixup{Kind: relo.kind, Local: unsetLocalValue, Target: target, Poison: false}, nil

case reloFieldRShiftU64:
return COREFixup{Kind: relo.kind, Local: unsetLocalValue, Target: 64 - targetField.bitfieldSize, Poison: false}, nil
}
}

Expand Down Expand Up @@ -509,8 +556,10 @@ func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) {
}

type coreField struct {
Type Type
offset uint32
Type Type
offset uint32
bitfieldOffset uint32
bitfieldSize uint32
}

func adjustOffset(base uint32, t Type, n int) (uint32, error) {
Expand All @@ -522,12 +571,31 @@ func adjustOffset(base uint32, t Type, n int) (uint32, error) {
return base + (uint32(n) * uint32(size) * 8), nil
}

// calculateBitfieldLoad computes the size and offset for loading a bitfield from
// the target structure.
func calculateBitfieldLoad(bitOffset, bitSize int) (size int, offset int, err error) {
// Start with the smallest possible aligned load before the bit offset.
size = 1
offset = bitOffset / 8 / size * size

// Iterate until the load is large enough to capture the target bitfield.
for bitOffset+bitSize > (offset+size)*8 {
if size >= 8 {
return -1, -1, fmt.Errorf("could not calculate bitfield load: load size too large (%dB)", size)
}
// Double the size of the load and recompute the offset.
size *= 2
offset = bitOffset / 8 / size * size
}
return size, offset, nil
}

// coreFindField descends into the local type using the accessor and tries to
// find an equivalent field in target at each step.
//
// Returns the field and the offset of the field from the start of
// target in bits.
func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreField, _ error) {
func coreFindField(local Type, localAcc coreAccessor, target Type) (coreField, coreField, error) {
// The first index is used to offset a pointer of the base type like
// when accessing an array.
localOffset, err := adjustOffset(0, local, localAcc[0])
Expand Down Expand Up @@ -579,17 +647,39 @@ func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreFie
return coreField{}, coreField{}, err
}

if targetMember.BitfieldSize > 0 {
return coreField{}, coreField{}, fmt.Errorf("field %q is a bitfield: %w", targetMember.Name, ErrNotSupported)
}

local = localMember.Type
localMaybeFlex = acc == len(localMembers)-1
localOffset += localMember.OffsetBits
target = targetMember.Type
targetMaybeFlex = last
targetOffset += targetMember.OffsetBits

if targetMember.BitfieldSize > 0 {
// For bitfields we compute the offset from which to load
// the value, and we include the offset of the bitfield and its
// size in 'coreField' so we can later compute the proper bit shift.
size, offset, err := calculateBitfieldLoad(int(targetOffset), int(targetMember.BitfieldSize))
if err != nil {
return coreField{}, coreField{}, err
}

// Adjust the load instruction, if needed.
if targetLoad, ok := target.(*Int); ok && targetLoad.Size != uint32(size) {
target = target.copy()
target.(*Int).Size = uint32(size)
}

return coreField{local, localOffset, localMember.OffsetBits, localMember.BitfieldSize},
coreField{target, uint32(offset * 8), targetMember.OffsetBits, targetMember.BitfieldSize},
nil
} else if localMember.BitfieldSize > 0 {
// Going from a bitfield to a normal field. Special-cased here as we cannot
// validate 'local' e.g. for byte_sz.
return coreField{local, localOffset, localMember.OffsetBits, localMember.BitfieldSize},
coreField{target, targetOffset, 0, 0},
nil
}

case *Array:
// For arrays, acc is the index in the target.
targetType, ok := target.(*Array)
Expand Down Expand Up @@ -634,7 +724,7 @@ func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreFie
}
}

return coreField{local, localOffset}, coreField{target, targetOffset}, nil
return coreField{local, localOffset, 0, 0}, coreField{target, targetOffset, 0, 0}, nil
}

// coreFindMember finds a member in a composite type while handling anonymous
Expand Down
4 changes: 4 additions & 0 deletions internal/btf/core_reloc_test.go
Expand Up @@ -33,6 +33,10 @@ func TestCoreRelocationLoad(t *testing.T) {
TargetBTF: fh,
})

if strings.Contains(progSpec.SectionName, "/k54_") {
testutils.SkipOnOldKernel(t, "5.4", "5.4 kernel or newer")
}

if strings.HasPrefix(progSpec.Name, "err_") {
if err == nil {
prog.Close()
Expand Down

0 comments on commit d928ec2

Please sign in to comment.