diff --git a/src/runtime/go.mod b/src/runtime/go.mod index 1c4d6df76117..a99b43be6fd3 100644 --- a/src/runtime/go.mod +++ b/src/runtime/go.mod @@ -59,7 +59,7 @@ require ( go.opentelemetry.io/otel/sdk v1.36.0 go.opentelemetry.io/otel/trace v1.36.0 golang.org/x/oauth2 v0.30.0 - golang.org/x/sys v0.34.0 + golang.org/x/sys v0.35.0 google.golang.org/grpc v1.74.2 google.golang.org/protobuf v1.36.8 k8s.io/apimachinery v0.30.0 @@ -74,7 +74,7 @@ require ( github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/cilium/ebpf v0.16.0 // indirect + github.com/cilium/ebpf v0.19.0 // indirect github.com/containerd/cgroups/v3 v3.0.5 // indirect github.com/containerd/continuity v0.4.5 // indirect github.com/containerd/errdefs v0.3.0 // indirect @@ -127,11 +127,11 @@ require ( go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect go.opentelemetry.io/otel/metric v1.36.0 // indirect - golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b // indirect golang.org/x/mod v0.27.0 // indirect - golang.org/x/net v0.42.0 // indirect + golang.org/x/net v0.43.0 // indirect golang.org/x/sync v0.16.0 // indirect - golang.org/x/text v0.27.0 // indirect + golang.org/x/text v0.28.0 // indirect + golang.org/x/tools v0.36.0 // indirect google.golang.org/genproto v0.0.0-20250826171959-ef028d996bc1 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250826171959-ef028d996bc1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/src/runtime/go.sum b/src/runtime/go.sum index ebd64be475ee..5de267ef9e93 100644 --- a/src/runtime/go.sum +++ b/src/runtime/go.sum @@ -27,8 +27,8 @@ github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.16.0 h1:+BiEnHL6Z7lXnlGUsXQPPAE7+kenAd4ES8MQ5min0Ok= -github.com/cilium/ebpf v0.16.0/go.mod h1:L7u2Blt2jMM/vLAVgjxluxtBKlz3/GWjB0dMOEngfwE= +github.com/cilium/ebpf v0.19.0 h1:Ro/rE64RmFBeA9FGjcTc+KmCeY6jXmryu6FfnzPRIao= +github.com/cilium/ebpf v0.19.0/go.mod h1:fLCgMo3l8tZmAdM3B2XqdFzXBpwkcSTroaVqN08OWVY= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/container-orchestrated-devices/container-device-interface v0.6.0 h1:aWwcz/Ep0Fd7ZuBjQGjU/jdPloM7ydhMW13h85jZNvk= @@ -133,8 +133,8 @@ github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogB github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/validate v0.22.3 h1:KxG9mu5HBRYbecRb37KRCihvGGtND2aXziBAv0NNfyI= github.com/go-openapi/validate v0.22.3/go.mod h1:kVxh31KbfsxU8ZyoHaDbLBWU5CnMdqBUEtadQ2G4d5M= -github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= -github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= +github.com/go-quicktest/qt v1.101.1-0.20240301121107-c6c8733fa1e6 h1:teYtXy9B7y5lHTp8V9KPxpYRAVA7dozigQcMiBust1s= +github.com/go-quicktest/qt v1.101.1-0.20240301121107-c6c8733fa1e6/go.mod h1:p4lGIVX+8Wa6ZPNDvqcxq36XpUDLh42FLetFU7odllI= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= @@ -387,8 +387,6 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b h1:DXr+pvt3nC887026GRP39Ej11UATqWDmWuS99x26cD0= -golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -412,8 +410,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs= -golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= @@ -450,8 +448,8 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220817070843-5a390386f1f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= -golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -460,8 +458,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= -golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= diff --git a/src/runtime/vendor/github.com/cilium/ebpf/.gitattributes b/src/runtime/vendor/github.com/cilium/ebpf/.gitattributes index 113f97b9804d..ea7c9a89c324 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/.gitattributes +++ b/src/runtime/vendor/github.com/cilium/ebpf/.gitattributes @@ -1 +1,4 @@ +# Force line ending normalisation +* text=auto +# Show types.go in the PR diff view by default internal/sys/types.go linguist-generated=false diff --git a/src/runtime/vendor/github.com/cilium/ebpf/.golangci.yaml b/src/runtime/vendor/github.com/cilium/ebpf/.golangci.yaml index 65f91b910bf6..8f88708b2cbe 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/.golangci.yaml +++ b/src/runtime/vendor/github.com/cilium/ebpf/.golangci.yaml @@ -1,13 +1,29 @@ ---- +version: "2" linters: - disable-all: true + default: none enable: - - goimports - - gosimple + - depguard - govet - ineffassign - misspell - - staticcheck - - typecheck - unused + settings: + depguard: + rules: + no-x-sys-unix: + files: + - '!**/internal/unix/*.go' + - '!**/examples/**/*.go' + - '!**/docs/**/*.go' + deny: + - pkg: golang.org/x/sys/unix + desc: use internal/unix instead + +formatters: + enable: - gofmt + - goimports + settings: + goimports: + local-prefixes: + - github.com/cilium/ebpf diff --git a/src/runtime/vendor/github.com/cilium/ebpf/CODEOWNERS b/src/runtime/vendor/github.com/cilium/ebpf/CODEOWNERS index ca65d23c09d2..bd0a61158e60 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/CODEOWNERS +++ b/src/runtime/vendor/github.com/cilium/ebpf/CODEOWNERS @@ -8,4 +8,4 @@ ringbuf/ @florianl btf/ @dylandreimerink -cmd/bpf2go/ @mejedi +docs/ @ti-mo diff --git a/src/runtime/vendor/github.com/cilium/ebpf/Makefile b/src/runtime/vendor/github.com/cilium/ebpf/Makefile index d355eea71cae..45462e8d5a89 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/Makefile +++ b/src/runtime/vendor/github.com/cilium/ebpf/Makefile @@ -1,10 +1,10 @@ # The development version of clang is distributed as the 'clang' binary, # while stable/released versions have a version number attached. # Pin the default clang to a stable version. -CLANG ?= clang-17 -STRIP ?= llvm-strip-17 -OBJCOPY ?= llvm-objcopy-17 -CFLAGS := -O2 -g -Wall -Werror $(CFLAGS) +CLANG ?= clang-20 +STRIP ?= llvm-strip-20 +OBJCOPY ?= llvm-objcopy-20 +CFLAGS := -O2 -g -Wall -Werror -mcpu=v2 $(CFLAGS) CI_KERNEL_URL ?= https://github.com/cilium/ci-kernels/raw/master/ @@ -16,14 +16,17 @@ UIDGID := $(shell stat -c '%u:%g' ${REPODIR}) # Prefer podman if installed, otherwise use docker. # Note: Setting the var at runtime will always override. CONTAINER_ENGINE ?= $(if $(shell command -v podman), podman, docker) -CONTAINER_RUN_ARGS ?= $(if $(filter ${CONTAINER_ENGINE}, podman), --log-driver=none, --user "${UIDGID}") +CONTAINER_RUN_ARGS ?= $(if $(filter ${CONTAINER_ENGINE}, podman), \ + --log-driver=none \ + -v "$(shell go env GOCACHE)":/root/.cache/go-build \ + -v "$(shell go env GOMODCACHE)":/go/pkg/mod, --user "${UIDGID}") IMAGE := $(shell cat ${REPODIR}/testdata/docker/IMAGE) VERSION := $(shell cat ${REPODIR}/testdata/docker/VERSION) TARGETS := \ - testdata/loader-clang-11 \ testdata/loader-clang-14 \ + testdata/loader-clang-17 \ testdata/loader-$(CLANG) \ testdata/manyprogs \ testdata/btf_map_init \ @@ -39,16 +42,18 @@ TARGETS := \ testdata/subprog_reloc \ testdata/fwd_decl \ testdata/kconfig \ - testdata/kconfig_config \ + testdata/ksym \ testdata/kfunc \ testdata/invalid-kfunc \ testdata/kfunc-kmod \ testdata/constants \ testdata/errors \ + testdata/variables \ btf/testdata/relocs \ btf/testdata/relocs_read \ btf/testdata/relocs_read_tgt \ btf/testdata/relocs_enum \ + btf/testdata/tags \ cmd/bpf2go/testdata/minimal .PHONY: all clean container-all container-shell generate @@ -57,21 +62,21 @@ TARGETS := \ # Build all ELF binaries using a containerized LLVM toolchain. container-all: - +${CONTAINER_ENGINE} run --rm -t ${CONTAINER_RUN_ARGS} \ + +${CONTAINER_ENGINE} run --rm -ti ${CONTAINER_RUN_ARGS} \ -v "${REPODIR}":/ebpf -w /ebpf --env MAKEFLAGS \ --env HOME="/tmp" \ --env BPF2GO_CC="$(CLANG)" \ - --env BPF2GO_FLAGS="-fdebug-prefix-map=/ebpf=. $(CFLAGS)" \ + --env BPF2GO_CFLAGS="$(CFLAGS)" \ "${IMAGE}:${VERSION}" \ make all # (debug) Drop the user into a shell inside the container as root. # Set BPF2GO_ envs to make 'make generate' just work. container-shell: - ${CONTAINER_ENGINE} run --rm -ti \ + ${CONTAINER_ENGINE} run --rm -ti ${CONTAINER_RUN_ARGS} \ -v "${REPODIR}":/ebpf -w /ebpf \ --env BPF2GO_CC="$(CLANG)" \ - --env BPF2GO_FLAGS="-fdebug-prefix-map=/ebpf=. $(CFLAGS)" \ + --env BPF2GO_CFLAGS="$(CFLAGS)" \ "${IMAGE}:${VERSION}" clean: diff --git a/src/runtime/vendor/github.com/cilium/ebpf/README.md b/src/runtime/vendor/github.com/cilium/ebpf/README.md index 85871db1ae39..d471ae769379 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/README.md +++ b/src/runtime/vendor/github.com/cilium/ebpf/README.md @@ -53,13 +53,16 @@ This library includes the following packages: * [rlimit](https://pkg.go.dev/github.com/cilium/ebpf/rlimit) provides a convenient API to lift the `RLIMIT_MEMLOCK` constraint on kernels before 5.11. * [btf](https://pkg.go.dev/github.com/cilium/ebpf/btf) allows reading the BPF Type Format. +* [pin](https://pkg.go.dev/github.com/cilium/ebpf/pin) provides APIs for working with pinned objects on bpffs. ## Requirements * A version of Go that is [supported by upstream](https://golang.org/doc/devel/release.html#policy) -* CI is run against kernel.org LTS releases. >= 4.4 should work but EOL'ed versions - are not supported. +* Linux: CI is run against kernel.org LTS releases. >= 4.4 should work but EOL'ed + versions are not supported. +* Windows: CI is run against Windows Server 2022. Only the latest eBPF for Windows + release is supported. ## License diff --git a/src/runtime/vendor/github.com/cilium/ebpf/asm/func.go b/src/runtime/vendor/github.com/cilium/ebpf/asm/func.go index 84a40b2277f0..5ee4e954f581 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/asm/func.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/asm/func.go @@ -1,245 +1,18 @@ package asm +import "github.com/cilium/ebpf/internal/platform" + //go:generate go run golang.org/x/tools/cmd/stringer@latest -output func_string.go -type=BuiltinFunc // BuiltinFunc is a built-in eBPF function. -type BuiltinFunc int32 - -func (_ BuiltinFunc) Max() BuiltinFunc { - return maxBuiltinFunc - 1 -} +type BuiltinFunc uint32 -// eBPF built-in functions -// -// You can regenerate this list using the following gawk script: +// BuiltinFuncForPlatform returns a platform specific function constant. // -// /FN\(.+\),/ { -// match($1, /\(([a-z_0-9]+),/, r) -// split(r[1], p, "_") -// printf "Fn" -// for (i in p) { -// printf "%s%s", toupper(substr(p[i], 1, 1)), substr(p[i], 2) -// } -// print "" -// } -// -// The script expects include/uapi/linux/bpf.h as it's input. -const ( - FnUnspec BuiltinFunc = iota - FnMapLookupElem - FnMapUpdateElem - FnMapDeleteElem - FnProbeRead - FnKtimeGetNs - FnTracePrintk - FnGetPrandomU32 - FnGetSmpProcessorId - FnSkbStoreBytes - FnL3CsumReplace - FnL4CsumReplace - FnTailCall - FnCloneRedirect - FnGetCurrentPidTgid - FnGetCurrentUidGid - FnGetCurrentComm - FnGetCgroupClassid - FnSkbVlanPush - FnSkbVlanPop - FnSkbGetTunnelKey - FnSkbSetTunnelKey - FnPerfEventRead - FnRedirect - FnGetRouteRealm - FnPerfEventOutput - FnSkbLoadBytes - FnGetStackid - FnCsumDiff - FnSkbGetTunnelOpt - FnSkbSetTunnelOpt - FnSkbChangeProto - FnSkbChangeType - FnSkbUnderCgroup - FnGetHashRecalc - FnGetCurrentTask - FnProbeWriteUser - FnCurrentTaskUnderCgroup - FnSkbChangeTail - FnSkbPullData - FnCsumUpdate - FnSetHashInvalid - FnGetNumaNodeId - FnSkbChangeHead - FnXdpAdjustHead - FnProbeReadStr - FnGetSocketCookie - FnGetSocketUid - FnSetHash - FnSetsockopt - FnSkbAdjustRoom - FnRedirectMap - FnSkRedirectMap - FnSockMapUpdate - FnXdpAdjustMeta - FnPerfEventReadValue - FnPerfProgReadValue - FnGetsockopt - FnOverrideReturn - FnSockOpsCbFlagsSet - FnMsgRedirectMap - FnMsgApplyBytes - FnMsgCorkBytes - FnMsgPullData - FnBind - FnXdpAdjustTail - FnSkbGetXfrmState - FnGetStack - FnSkbLoadBytesRelative - FnFibLookup - FnSockHashUpdate - FnMsgRedirectHash - FnSkRedirectHash - FnLwtPushEncap - FnLwtSeg6StoreBytes - FnLwtSeg6AdjustSrh - FnLwtSeg6Action - FnRcRepeat - FnRcKeydown - FnSkbCgroupId - FnGetCurrentCgroupId - FnGetLocalStorage - FnSkSelectReuseport - FnSkbAncestorCgroupId - FnSkLookupTcp - FnSkLookupUdp - FnSkRelease - FnMapPushElem - FnMapPopElem - FnMapPeekElem - FnMsgPushData - FnMsgPopData - FnRcPointerRel - FnSpinLock - FnSpinUnlock - FnSkFullsock - FnTcpSock - FnSkbEcnSetCe - FnGetListenerSock - FnSkcLookupTcp - FnTcpCheckSyncookie - FnSysctlGetName - FnSysctlGetCurrentValue - FnSysctlGetNewValue - FnSysctlSetNewValue - FnStrtol - FnStrtoul - FnSkStorageGet - FnSkStorageDelete - FnSendSignal - FnTcpGenSyncookie - FnSkbOutput - FnProbeReadUser - FnProbeReadKernel - FnProbeReadUserStr - FnProbeReadKernelStr - FnTcpSendAck - FnSendSignalThread - FnJiffies64 - FnReadBranchRecords - FnGetNsCurrentPidTgid - FnXdpOutput - FnGetNetnsCookie - FnGetCurrentAncestorCgroupId - FnSkAssign - FnKtimeGetBootNs - FnSeqPrintf - FnSeqWrite - FnSkCgroupId - FnSkAncestorCgroupId - FnRingbufOutput - FnRingbufReserve - FnRingbufSubmit - FnRingbufDiscard - FnRingbufQuery - FnCsumLevel - FnSkcToTcp6Sock - FnSkcToTcpSock - FnSkcToTcpTimewaitSock - FnSkcToTcpRequestSock - FnSkcToUdp6Sock - FnGetTaskStack - FnLoadHdrOpt - FnStoreHdrOpt - FnReserveHdrOpt - FnInodeStorageGet - FnInodeStorageDelete - FnDPath - FnCopyFromUser - FnSnprintfBtf - FnSeqPrintfBtf - FnSkbCgroupClassid - FnRedirectNeigh - FnPerCpuPtr - FnThisCpuPtr - FnRedirectPeer - FnTaskStorageGet - FnTaskStorageDelete - FnGetCurrentTaskBtf - FnBprmOptsSet - FnKtimeGetCoarseNs - FnImaInodeHash - FnSockFromFile - FnCheckMtu - FnForEachMapElem - FnSnprintf - FnSysBpf - FnBtfFindByNameKind - FnSysClose - FnTimerInit - FnTimerSetCallback - FnTimerStart - FnTimerCancel - FnGetFuncIp - FnGetAttachCookie - FnTaskPtRegs - FnGetBranchSnapshot - FnTraceVprintk - FnSkcToUnixSock - FnKallsymsLookupName - FnFindVma - FnLoop - FnStrncmp - FnGetFuncArg - FnGetFuncRet - FnGetFuncArgCnt - FnGetRetval - FnSetRetval - FnXdpGetBuffLen - FnXdpLoadBytes - FnXdpStoreBytes - FnCopyFromUserTask - FnSkbSetTstamp - FnImaFileHash - FnKptrXchg - FnMapLookupPercpuElem - FnSkcToMptcpSock - FnDynptrFromMem - FnRingbufReserveDynptr - FnRingbufSubmitDynptr - FnRingbufDiscardDynptr - FnDynptrRead - FnDynptrWrite - FnDynptrData - FnTcpRawGenSyncookieIpv4 - FnTcpRawGenSyncookieIpv6 - FnTcpRawCheckSyncookieIpv4 - FnTcpRawCheckSyncookieIpv6 - FnKtimeGetTaiNs - FnUserRingbufDrain - FnCgrpStorageGet - FnCgrpStorageDelete - - maxBuiltinFunc -) +// Use this if the library doesn't provide a constant yet. +func BuiltinFuncForPlatform(plat string, value uint32) (BuiltinFunc, error) { + return platform.EncodeConstant[BuiltinFunc](plat, value) +} // Call emits a function call. func (fn BuiltinFunc) Call() Instruction { diff --git a/src/runtime/vendor/github.com/cilium/ebpf/asm/func_lin.go b/src/runtime/vendor/github.com/cilium/ebpf/asm/func_lin.go new file mode 100644 index 000000000000..1dd026d62de1 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/asm/func_lin.go @@ -0,0 +1,223 @@ +// Code generated by internal/cmd/genfunctions.awk; DO NOT EDIT. + +package asm + +// Code in this file is derived from Linux, available under the GPL-2.0 WITH Linux-syscall-note. + +import "github.com/cilium/ebpf/internal/platform" + +// Built-in functions (Linux). +const ( + FnUnspec = BuiltinFunc(platform.LinuxTag | 0) //lint:ignore SA4016 consistency + FnMapLookupElem = BuiltinFunc(platform.LinuxTag | 1) + FnMapUpdateElem = BuiltinFunc(platform.LinuxTag | 2) + FnMapDeleteElem = BuiltinFunc(platform.LinuxTag | 3) + FnProbeRead = BuiltinFunc(platform.LinuxTag | 4) + FnKtimeGetNs = BuiltinFunc(platform.LinuxTag | 5) + FnTracePrintk = BuiltinFunc(platform.LinuxTag | 6) + FnGetPrandomU32 = BuiltinFunc(platform.LinuxTag | 7) + FnGetSmpProcessorId = BuiltinFunc(platform.LinuxTag | 8) + FnSkbStoreBytes = BuiltinFunc(platform.LinuxTag | 9) + FnL3CsumReplace = BuiltinFunc(platform.LinuxTag | 10) + FnL4CsumReplace = BuiltinFunc(platform.LinuxTag | 11) + FnTailCall = BuiltinFunc(platform.LinuxTag | 12) + FnCloneRedirect = BuiltinFunc(platform.LinuxTag | 13) + FnGetCurrentPidTgid = BuiltinFunc(platform.LinuxTag | 14) + FnGetCurrentUidGid = BuiltinFunc(platform.LinuxTag | 15) + FnGetCurrentComm = BuiltinFunc(platform.LinuxTag | 16) + FnGetCgroupClassid = BuiltinFunc(platform.LinuxTag | 17) + FnSkbVlanPush = BuiltinFunc(platform.LinuxTag | 18) + FnSkbVlanPop = BuiltinFunc(platform.LinuxTag | 19) + FnSkbGetTunnelKey = BuiltinFunc(platform.LinuxTag | 20) + FnSkbSetTunnelKey = BuiltinFunc(platform.LinuxTag | 21) + FnPerfEventRead = BuiltinFunc(platform.LinuxTag | 22) + FnRedirect = BuiltinFunc(platform.LinuxTag | 23) + FnGetRouteRealm = BuiltinFunc(platform.LinuxTag | 24) + FnPerfEventOutput = BuiltinFunc(platform.LinuxTag | 25) + FnSkbLoadBytes = BuiltinFunc(platform.LinuxTag | 26) + FnGetStackid = BuiltinFunc(platform.LinuxTag | 27) + FnCsumDiff = BuiltinFunc(platform.LinuxTag | 28) + FnSkbGetTunnelOpt = BuiltinFunc(platform.LinuxTag | 29) + FnSkbSetTunnelOpt = BuiltinFunc(platform.LinuxTag | 30) + FnSkbChangeProto = BuiltinFunc(platform.LinuxTag | 31) + FnSkbChangeType = BuiltinFunc(platform.LinuxTag | 32) + FnSkbUnderCgroup = BuiltinFunc(platform.LinuxTag | 33) + FnGetHashRecalc = BuiltinFunc(platform.LinuxTag | 34) + FnGetCurrentTask = BuiltinFunc(platform.LinuxTag | 35) + FnProbeWriteUser = BuiltinFunc(platform.LinuxTag | 36) + FnCurrentTaskUnderCgroup = BuiltinFunc(platform.LinuxTag | 37) + FnSkbChangeTail = BuiltinFunc(platform.LinuxTag | 38) + FnSkbPullData = BuiltinFunc(platform.LinuxTag | 39) + FnCsumUpdate = BuiltinFunc(platform.LinuxTag | 40) + FnSetHashInvalid = BuiltinFunc(platform.LinuxTag | 41) + FnGetNumaNodeId = BuiltinFunc(platform.LinuxTag | 42) + FnSkbChangeHead = BuiltinFunc(platform.LinuxTag | 43) + FnXdpAdjustHead = BuiltinFunc(platform.LinuxTag | 44) + FnProbeReadStr = BuiltinFunc(platform.LinuxTag | 45) + FnGetSocketCookie = BuiltinFunc(platform.LinuxTag | 46) + FnGetSocketUid = BuiltinFunc(platform.LinuxTag | 47) + FnSetHash = BuiltinFunc(platform.LinuxTag | 48) + FnSetsockopt = BuiltinFunc(platform.LinuxTag | 49) + FnSkbAdjustRoom = BuiltinFunc(platform.LinuxTag | 50) + FnRedirectMap = BuiltinFunc(platform.LinuxTag | 51) + FnSkRedirectMap = BuiltinFunc(platform.LinuxTag | 52) + FnSockMapUpdate = BuiltinFunc(platform.LinuxTag | 53) + FnXdpAdjustMeta = BuiltinFunc(platform.LinuxTag | 54) + FnPerfEventReadValue = BuiltinFunc(platform.LinuxTag | 55) + FnPerfProgReadValue = BuiltinFunc(platform.LinuxTag | 56) + FnGetsockopt = BuiltinFunc(platform.LinuxTag | 57) + FnOverrideReturn = BuiltinFunc(platform.LinuxTag | 58) + FnSockOpsCbFlagsSet = BuiltinFunc(platform.LinuxTag | 59) + FnMsgRedirectMap = BuiltinFunc(platform.LinuxTag | 60) + FnMsgApplyBytes = BuiltinFunc(platform.LinuxTag | 61) + FnMsgCorkBytes = BuiltinFunc(platform.LinuxTag | 62) + FnMsgPullData = BuiltinFunc(platform.LinuxTag | 63) + FnBind = BuiltinFunc(platform.LinuxTag | 64) + FnXdpAdjustTail = BuiltinFunc(platform.LinuxTag | 65) + FnSkbGetXfrmState = BuiltinFunc(platform.LinuxTag | 66) + FnGetStack = BuiltinFunc(platform.LinuxTag | 67) + FnSkbLoadBytesRelative = BuiltinFunc(platform.LinuxTag | 68) + FnFibLookup = BuiltinFunc(platform.LinuxTag | 69) + FnSockHashUpdate = BuiltinFunc(platform.LinuxTag | 70) + FnMsgRedirectHash = BuiltinFunc(platform.LinuxTag | 71) + FnSkRedirectHash = BuiltinFunc(platform.LinuxTag | 72) + FnLwtPushEncap = BuiltinFunc(platform.LinuxTag | 73) + FnLwtSeg6StoreBytes = BuiltinFunc(platform.LinuxTag | 74) + FnLwtSeg6AdjustSrh = BuiltinFunc(platform.LinuxTag | 75) + FnLwtSeg6Action = BuiltinFunc(platform.LinuxTag | 76) + FnRcRepeat = BuiltinFunc(platform.LinuxTag | 77) + FnRcKeydown = BuiltinFunc(platform.LinuxTag | 78) + FnSkbCgroupId = BuiltinFunc(platform.LinuxTag | 79) + FnGetCurrentCgroupId = BuiltinFunc(platform.LinuxTag | 80) + FnGetLocalStorage = BuiltinFunc(platform.LinuxTag | 81) + FnSkSelectReuseport = BuiltinFunc(platform.LinuxTag | 82) + FnSkbAncestorCgroupId = BuiltinFunc(platform.LinuxTag | 83) + FnSkLookupTcp = BuiltinFunc(platform.LinuxTag | 84) + FnSkLookupUdp = BuiltinFunc(platform.LinuxTag | 85) + FnSkRelease = BuiltinFunc(platform.LinuxTag | 86) + FnMapPushElem = BuiltinFunc(platform.LinuxTag | 87) + FnMapPopElem = BuiltinFunc(platform.LinuxTag | 88) + FnMapPeekElem = BuiltinFunc(platform.LinuxTag | 89) + FnMsgPushData = BuiltinFunc(platform.LinuxTag | 90) + FnMsgPopData = BuiltinFunc(platform.LinuxTag | 91) + FnRcPointerRel = BuiltinFunc(platform.LinuxTag | 92) + FnSpinLock = BuiltinFunc(platform.LinuxTag | 93) + FnSpinUnlock = BuiltinFunc(platform.LinuxTag | 94) + FnSkFullsock = BuiltinFunc(platform.LinuxTag | 95) + FnTcpSock = BuiltinFunc(platform.LinuxTag | 96) + FnSkbEcnSetCe = BuiltinFunc(platform.LinuxTag | 97) + FnGetListenerSock = BuiltinFunc(platform.LinuxTag | 98) + FnSkcLookupTcp = BuiltinFunc(platform.LinuxTag | 99) + FnTcpCheckSyncookie = BuiltinFunc(platform.LinuxTag | 100) + FnSysctlGetName = BuiltinFunc(platform.LinuxTag | 101) + FnSysctlGetCurrentValue = BuiltinFunc(platform.LinuxTag | 102) + FnSysctlGetNewValue = BuiltinFunc(platform.LinuxTag | 103) + FnSysctlSetNewValue = BuiltinFunc(platform.LinuxTag | 104) + FnStrtol = BuiltinFunc(platform.LinuxTag | 105) + FnStrtoul = BuiltinFunc(platform.LinuxTag | 106) + FnSkStorageGet = BuiltinFunc(platform.LinuxTag | 107) + FnSkStorageDelete = BuiltinFunc(platform.LinuxTag | 108) + FnSendSignal = BuiltinFunc(platform.LinuxTag | 109) + FnTcpGenSyncookie = BuiltinFunc(platform.LinuxTag | 110) + FnSkbOutput = BuiltinFunc(platform.LinuxTag | 111) + FnProbeReadUser = BuiltinFunc(platform.LinuxTag | 112) + FnProbeReadKernel = BuiltinFunc(platform.LinuxTag | 113) + FnProbeReadUserStr = BuiltinFunc(platform.LinuxTag | 114) + FnProbeReadKernelStr = BuiltinFunc(platform.LinuxTag | 115) + FnTcpSendAck = BuiltinFunc(platform.LinuxTag | 116) + FnSendSignalThread = BuiltinFunc(platform.LinuxTag | 117) + FnJiffies64 = BuiltinFunc(platform.LinuxTag | 118) + FnReadBranchRecords = BuiltinFunc(platform.LinuxTag | 119) + FnGetNsCurrentPidTgid = BuiltinFunc(platform.LinuxTag | 120) + FnXdpOutput = BuiltinFunc(platform.LinuxTag | 121) + FnGetNetnsCookie = BuiltinFunc(platform.LinuxTag | 122) + FnGetCurrentAncestorCgroupId = BuiltinFunc(platform.LinuxTag | 123) + FnSkAssign = BuiltinFunc(platform.LinuxTag | 124) + FnKtimeGetBootNs = BuiltinFunc(platform.LinuxTag | 125) + FnSeqPrintf = BuiltinFunc(platform.LinuxTag | 126) + FnSeqWrite = BuiltinFunc(platform.LinuxTag | 127) + FnSkCgroupId = BuiltinFunc(platform.LinuxTag | 128) + FnSkAncestorCgroupId = BuiltinFunc(platform.LinuxTag | 129) + FnRingbufOutput = BuiltinFunc(platform.LinuxTag | 130) + FnRingbufReserve = BuiltinFunc(platform.LinuxTag | 131) + FnRingbufSubmit = BuiltinFunc(platform.LinuxTag | 132) + FnRingbufDiscard = BuiltinFunc(platform.LinuxTag | 133) + FnRingbufQuery = BuiltinFunc(platform.LinuxTag | 134) + FnCsumLevel = BuiltinFunc(platform.LinuxTag | 135) + FnSkcToTcp6Sock = BuiltinFunc(platform.LinuxTag | 136) + FnSkcToTcpSock = BuiltinFunc(platform.LinuxTag | 137) + FnSkcToTcpTimewaitSock = BuiltinFunc(platform.LinuxTag | 138) + FnSkcToTcpRequestSock = BuiltinFunc(platform.LinuxTag | 139) + FnSkcToUdp6Sock = BuiltinFunc(platform.LinuxTag | 140) + FnGetTaskStack = BuiltinFunc(platform.LinuxTag | 141) + FnLoadHdrOpt = BuiltinFunc(platform.LinuxTag | 142) + FnStoreHdrOpt = BuiltinFunc(platform.LinuxTag | 143) + FnReserveHdrOpt = BuiltinFunc(platform.LinuxTag | 144) + FnInodeStorageGet = BuiltinFunc(platform.LinuxTag | 145) + FnInodeStorageDelete = BuiltinFunc(platform.LinuxTag | 146) + FnDPath = BuiltinFunc(platform.LinuxTag | 147) + FnCopyFromUser = BuiltinFunc(platform.LinuxTag | 148) + FnSnprintfBtf = BuiltinFunc(platform.LinuxTag | 149) + FnSeqPrintfBtf = BuiltinFunc(platform.LinuxTag | 150) + FnSkbCgroupClassid = BuiltinFunc(platform.LinuxTag | 151) + FnRedirectNeigh = BuiltinFunc(platform.LinuxTag | 152) + FnPerCpuPtr = BuiltinFunc(platform.LinuxTag | 153) + FnThisCpuPtr = BuiltinFunc(platform.LinuxTag | 154) + FnRedirectPeer = BuiltinFunc(platform.LinuxTag | 155) + FnTaskStorageGet = BuiltinFunc(platform.LinuxTag | 156) + FnTaskStorageDelete = BuiltinFunc(platform.LinuxTag | 157) + FnGetCurrentTaskBtf = BuiltinFunc(platform.LinuxTag | 158) + FnBprmOptsSet = BuiltinFunc(platform.LinuxTag | 159) + FnKtimeGetCoarseNs = BuiltinFunc(platform.LinuxTag | 160) + FnImaInodeHash = BuiltinFunc(platform.LinuxTag | 161) + FnSockFromFile = BuiltinFunc(platform.LinuxTag | 162) + FnCheckMtu = BuiltinFunc(platform.LinuxTag | 163) + FnForEachMapElem = BuiltinFunc(platform.LinuxTag | 164) + FnSnprintf = BuiltinFunc(platform.LinuxTag | 165) + FnSysBpf = BuiltinFunc(platform.LinuxTag | 166) + FnBtfFindByNameKind = BuiltinFunc(platform.LinuxTag | 167) + FnSysClose = BuiltinFunc(platform.LinuxTag | 168) + FnTimerInit = BuiltinFunc(platform.LinuxTag | 169) + FnTimerSetCallback = BuiltinFunc(platform.LinuxTag | 170) + FnTimerStart = BuiltinFunc(platform.LinuxTag | 171) + FnTimerCancel = BuiltinFunc(platform.LinuxTag | 172) + FnGetFuncIp = BuiltinFunc(platform.LinuxTag | 173) + FnGetAttachCookie = BuiltinFunc(platform.LinuxTag | 174) + FnTaskPtRegs = BuiltinFunc(platform.LinuxTag | 175) + FnGetBranchSnapshot = BuiltinFunc(platform.LinuxTag | 176) + FnTraceVprintk = BuiltinFunc(platform.LinuxTag | 177) + FnSkcToUnixSock = BuiltinFunc(platform.LinuxTag | 178) + FnKallsymsLookupName = BuiltinFunc(platform.LinuxTag | 179) + FnFindVma = BuiltinFunc(platform.LinuxTag | 180) + FnLoop = BuiltinFunc(platform.LinuxTag | 181) + FnStrncmp = BuiltinFunc(platform.LinuxTag | 182) + FnGetFuncArg = BuiltinFunc(platform.LinuxTag | 183) + FnGetFuncRet = BuiltinFunc(platform.LinuxTag | 184) + FnGetFuncArgCnt = BuiltinFunc(platform.LinuxTag | 185) + FnGetRetval = BuiltinFunc(platform.LinuxTag | 186) + FnSetRetval = BuiltinFunc(platform.LinuxTag | 187) + FnXdpGetBuffLen = BuiltinFunc(platform.LinuxTag | 188) + FnXdpLoadBytes = BuiltinFunc(platform.LinuxTag | 189) + FnXdpStoreBytes = BuiltinFunc(platform.LinuxTag | 190) + FnCopyFromUserTask = BuiltinFunc(platform.LinuxTag | 191) + FnSkbSetTstamp = BuiltinFunc(platform.LinuxTag | 192) + FnImaFileHash = BuiltinFunc(platform.LinuxTag | 193) + FnKptrXchg = BuiltinFunc(platform.LinuxTag | 194) + FnMapLookupPercpuElem = BuiltinFunc(platform.LinuxTag | 195) + FnSkcToMptcpSock = BuiltinFunc(platform.LinuxTag | 196) + FnDynptrFromMem = BuiltinFunc(platform.LinuxTag | 197) + FnRingbufReserveDynptr = BuiltinFunc(platform.LinuxTag | 198) + FnRingbufSubmitDynptr = BuiltinFunc(platform.LinuxTag | 199) + FnRingbufDiscardDynptr = BuiltinFunc(platform.LinuxTag | 200) + FnDynptrRead = BuiltinFunc(platform.LinuxTag | 201) + FnDynptrWrite = BuiltinFunc(platform.LinuxTag | 202) + FnDynptrData = BuiltinFunc(platform.LinuxTag | 203) + FnTcpRawGenSyncookieIpv4 = BuiltinFunc(platform.LinuxTag | 204) + FnTcpRawGenSyncookieIpv6 = BuiltinFunc(platform.LinuxTag | 205) + FnTcpRawCheckSyncookieIpv4 = BuiltinFunc(platform.LinuxTag | 206) + FnTcpRawCheckSyncookieIpv6 = BuiltinFunc(platform.LinuxTag | 207) + FnKtimeGetTaiNs = BuiltinFunc(platform.LinuxTag | 208) + FnUserRingbufDrain = BuiltinFunc(platform.LinuxTag | 209) + FnCgrpStorageGet = BuiltinFunc(platform.LinuxTag | 210) + FnCgrpStorageDelete = BuiltinFunc(platform.LinuxTag | 211) +) diff --git a/src/runtime/vendor/github.com/cilium/ebpf/asm/func_string.go b/src/runtime/vendor/github.com/cilium/ebpf/asm/func_string.go index 47150bc4f2d2..d5d624f09483 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/asm/func_string.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/asm/func_string.go @@ -220,16 +220,57 @@ func _() { _ = x[FnUserRingbufDrain-209] _ = x[FnCgrpStorageGet-210] _ = x[FnCgrpStorageDelete-211] - _ = x[maxBuiltinFunc-212] + _ = x[WindowsFnMapLookupElem-268435457] + _ = x[WindowsFnMapUpdateElem-268435458] + _ = x[WindowsFnMapDeleteElem-268435459] + _ = x[WindowsFnMapLookupAndDeleteElem-268435460] + _ = x[WindowsFnTailCall-268435461] + _ = x[WindowsFnGetPrandomU32-268435462] + _ = x[WindowsFnKtimeGetBootNs-268435463] + _ = x[WindowsFnGetSmpProcessorId-268435464] + _ = x[WindowsFnKtimeGetNs-268435465] + _ = x[WindowsFnCsumDiff-268435466] + _ = x[WindowsFnRingbufOutput-268435467] + _ = x[WindowsFnTracePrintk2-268435468] + _ = x[WindowsFnTracePrintk3-268435469] + _ = x[WindowsFnTracePrintk4-268435470] + _ = x[WindowsFnTracePrintk5-268435471] + _ = x[WindowsFnMapPushElem-268435472] + _ = x[WindowsFnMapPopElem-268435473] + _ = x[WindowsFnMapPeekElem-268435474] + _ = x[WindowsFnGetCurrentPidTgid-268435475] + _ = x[WindowsFnGetCurrentLogonId-268435476] + _ = x[WindowsFnIsCurrentAdmin-268435477] + _ = x[WindowsFnMemcpy-268435478] + _ = x[WindowsFnMemcmp-268435479] + _ = x[WindowsFnMemset-268435480] + _ = x[WindowsFnMemmove-268435481] + _ = x[WindowsFnGetSocketCookie-268435482] + _ = x[WindowsFnStrncpyS-268435483] + _ = x[WindowsFnStrncatS-268435484] + _ = x[WindowsFnStrnlenS-268435485] + _ = x[WindowsFnKtimeGetBootMs-268435486] + _ = x[WindowsFnKtimeGetMs-268435487] } -const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFileFnCheckMtuFnForEachMapElemFnSnprintfFnSysBpfFnBtfFindByNameKindFnSysCloseFnTimerInitFnTimerSetCallbackFnTimerStartFnTimerCancelFnGetFuncIpFnGetAttachCookieFnTaskPtRegsFnGetBranchSnapshotFnTraceVprintkFnSkcToUnixSockFnKallsymsLookupNameFnFindVmaFnLoopFnStrncmpFnGetFuncArgFnGetFuncRetFnGetFuncArgCntFnGetRetvalFnSetRetvalFnXdpGetBuffLenFnXdpLoadBytesFnXdpStoreBytesFnCopyFromUserTaskFnSkbSetTstampFnImaFileHashFnKptrXchgFnMapLookupPercpuElemFnSkcToMptcpSockFnDynptrFromMemFnRingbufReserveDynptrFnRingbufSubmitDynptrFnRingbufDiscardDynptrFnDynptrReadFnDynptrWriteFnDynptrDataFnTcpRawGenSyncookieIpv4FnTcpRawGenSyncookieIpv6FnTcpRawCheckSyncookieIpv4FnTcpRawCheckSyncookieIpv6FnKtimeGetTaiNsFnUserRingbufDrainFnCgrpStorageGetFnCgrpStorageDeletemaxBuiltinFunc" +const ( + _BuiltinFunc_name_0 = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFileFnCheckMtuFnForEachMapElemFnSnprintfFnSysBpfFnBtfFindByNameKindFnSysCloseFnTimerInitFnTimerSetCallbackFnTimerStartFnTimerCancelFnGetFuncIpFnGetAttachCookieFnTaskPtRegsFnGetBranchSnapshotFnTraceVprintkFnSkcToUnixSockFnKallsymsLookupNameFnFindVmaFnLoopFnStrncmpFnGetFuncArgFnGetFuncRetFnGetFuncArgCntFnGetRetvalFnSetRetvalFnXdpGetBuffLenFnXdpLoadBytesFnXdpStoreBytesFnCopyFromUserTaskFnSkbSetTstampFnImaFileHashFnKptrXchgFnMapLookupPercpuElemFnSkcToMptcpSockFnDynptrFromMemFnRingbufReserveDynptrFnRingbufSubmitDynptrFnRingbufDiscardDynptrFnDynptrReadFnDynptrWriteFnDynptrDataFnTcpRawGenSyncookieIpv4FnTcpRawGenSyncookieIpv6FnTcpRawCheckSyncookieIpv4FnTcpRawCheckSyncookieIpv6FnKtimeGetTaiNsFnUserRingbufDrainFnCgrpStorageGetFnCgrpStorageDelete" + _BuiltinFunc_name_1 = "WindowsFnMapLookupElemWindowsFnMapUpdateElemWindowsFnMapDeleteElemWindowsFnMapLookupAndDeleteElemWindowsFnTailCallWindowsFnGetPrandomU32WindowsFnKtimeGetBootNsWindowsFnGetSmpProcessorIdWindowsFnKtimeGetNsWindowsFnCsumDiffWindowsFnRingbufOutputWindowsFnTracePrintk2WindowsFnTracePrintk3WindowsFnTracePrintk4WindowsFnTracePrintk5WindowsFnMapPushElemWindowsFnMapPopElemWindowsFnMapPeekElemWindowsFnGetCurrentPidTgidWindowsFnGetCurrentLogonIdWindowsFnIsCurrentAdminWindowsFnMemcpyWindowsFnMemcmpWindowsFnMemsetWindowsFnMemmoveWindowsFnGetSocketCookieWindowsFnStrncpySWindowsFnStrncatSWindowsFnStrnlenSWindowsFnKtimeGetBootMsWindowsFnKtimeGetMs" +) -var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424, 2434, 2450, 2460, 2468, 2487, 2497, 2508, 2526, 2538, 2551, 2562, 2579, 2591, 2610, 2624, 2639, 2659, 2668, 2674, 2683, 2695, 2707, 2722, 2733, 2744, 2759, 2773, 2788, 2806, 2820, 2833, 2843, 2864, 2880, 2895, 2917, 2938, 2960, 2972, 2985, 2997, 3021, 3045, 3071, 3097, 3112, 3130, 3146, 3165, 3179} +var ( + _BuiltinFunc_index_0 = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424, 2434, 2450, 2460, 2468, 2487, 2497, 2508, 2526, 2538, 2551, 2562, 2579, 2591, 2610, 2624, 2639, 2659, 2668, 2674, 2683, 2695, 2707, 2722, 2733, 2744, 2759, 2773, 2788, 2806, 2820, 2833, 2843, 2864, 2880, 2895, 2917, 2938, 2960, 2972, 2985, 2997, 3021, 3045, 3071, 3097, 3112, 3130, 3146, 3165} + _BuiltinFunc_index_1 = [...]uint16{0, 22, 44, 66, 97, 114, 136, 159, 185, 204, 221, 243, 264, 285, 306, 327, 347, 366, 386, 412, 438, 461, 476, 491, 506, 522, 546, 563, 580, 597, 620, 639} +) func (i BuiltinFunc) String() string { - if i < 0 || i >= BuiltinFunc(len(_BuiltinFunc_index)-1) { + switch { + case i <= 211: + return _BuiltinFunc_name_0[_BuiltinFunc_index_0[i]:_BuiltinFunc_index_0[i+1]] + case 268435457 <= i && i <= 268435487: + i -= 268435457 + return _BuiltinFunc_name_1[_BuiltinFunc_index_1[i]:_BuiltinFunc_index_1[i+1]] + default: return "BuiltinFunc(" + strconv.FormatInt(int64(i), 10) + ")" } - return _BuiltinFunc_name[_BuiltinFunc_index[i]:_BuiltinFunc_index[i+1]] } diff --git a/src/runtime/vendor/github.com/cilium/ebpf/asm/func_win.go b/src/runtime/vendor/github.com/cilium/ebpf/asm/func_win.go new file mode 100644 index 000000000000..b016f008630b --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/asm/func_win.go @@ -0,0 +1,44 @@ +// Code generated by internal/cmd/genwinfunctions.awk; DO NOT EDIT. + +package asm + +// Code in this file is derived from eBPF for Windows, available under the MIT License. + +import ( + "github.com/cilium/ebpf/internal/platform" +) + +// Built-in functions (Windows). +const ( + WindowsFnMapLookupElem = BuiltinFunc(platform.WindowsTag | 1) + WindowsFnMapUpdateElem = BuiltinFunc(platform.WindowsTag | 2) + WindowsFnMapDeleteElem = BuiltinFunc(platform.WindowsTag | 3) + WindowsFnMapLookupAndDeleteElem = BuiltinFunc(platform.WindowsTag | 4) + WindowsFnTailCall = BuiltinFunc(platform.WindowsTag | 5) + WindowsFnGetPrandomU32 = BuiltinFunc(platform.WindowsTag | 6) + WindowsFnKtimeGetBootNs = BuiltinFunc(platform.WindowsTag | 7) + WindowsFnGetSmpProcessorId = BuiltinFunc(platform.WindowsTag | 8) + WindowsFnKtimeGetNs = BuiltinFunc(platform.WindowsTag | 9) + WindowsFnCsumDiff = BuiltinFunc(platform.WindowsTag | 10) + WindowsFnRingbufOutput = BuiltinFunc(platform.WindowsTag | 11) + WindowsFnTracePrintk2 = BuiltinFunc(platform.WindowsTag | 12) + WindowsFnTracePrintk3 = BuiltinFunc(platform.WindowsTag | 13) + WindowsFnTracePrintk4 = BuiltinFunc(platform.WindowsTag | 14) + WindowsFnTracePrintk5 = BuiltinFunc(platform.WindowsTag | 15) + WindowsFnMapPushElem = BuiltinFunc(platform.WindowsTag | 16) + WindowsFnMapPopElem = BuiltinFunc(platform.WindowsTag | 17) + WindowsFnMapPeekElem = BuiltinFunc(platform.WindowsTag | 18) + WindowsFnGetCurrentPidTgid = BuiltinFunc(platform.WindowsTag | 19) + WindowsFnGetCurrentLogonId = BuiltinFunc(platform.WindowsTag | 20) + WindowsFnIsCurrentAdmin = BuiltinFunc(platform.WindowsTag | 21) + WindowsFnMemcpy = BuiltinFunc(platform.WindowsTag | 22) + WindowsFnMemcmp = BuiltinFunc(platform.WindowsTag | 23) + WindowsFnMemset = BuiltinFunc(platform.WindowsTag | 24) + WindowsFnMemmove = BuiltinFunc(platform.WindowsTag | 25) + WindowsFnGetSocketCookie = BuiltinFunc(platform.WindowsTag | 26) + WindowsFnStrncpyS = BuiltinFunc(platform.WindowsTag | 27) + WindowsFnStrncatS = BuiltinFunc(platform.WindowsTag | 28) + WindowsFnStrnlenS = BuiltinFunc(platform.WindowsTag | 29) + WindowsFnKtimeGetBootMs = BuiltinFunc(platform.WindowsTag | 30) + WindowsFnKtimeGetMs = BuiltinFunc(platform.WindowsTag | 31) +) diff --git a/src/runtime/vendor/github.com/cilium/ebpf/asm/instruction.go b/src/runtime/vendor/github.com/cilium/ebpf/asm/instruction.go index 67cd39d6f672..b2ce72ca8366 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/asm/instruction.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/asm/instruction.go @@ -11,8 +11,9 @@ import ( "sort" "strings" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/platform" "github.com/cilium/ebpf/internal/sys" - "github.com/cilium/ebpf/internal/unix" ) // InstructionSize is the size of a BPF instruction in bytes @@ -43,10 +44,10 @@ type Instruction struct { } // Unmarshal decodes a BPF instruction. -func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, error) { +func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder, platform string) error { data := make([]byte, InstructionSize) if _, err := io.ReadFull(r, data); err != nil { - return 0, err + return err } ins.OpCode = OpCode(data[0]) @@ -61,7 +62,23 @@ func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, err ins.Offset = int16(bo.Uint16(data[2:4])) - if ins.OpCode.Class().IsALU() { + // Convert to int32 before widening to int64 + // to ensure the signed bit is carried over. + ins.Constant = int64(int32(bo.Uint32(data[4:8]))) + + if ins.IsBuiltinCall() { + if ins.Constant >= 0 { + // Leave negative constants from the instruction stream + // unchanged. These are sometimes used as placeholders for later + // patching. + // This relies on not having a valid platform tag with a high bit set. + fn, err := BuiltinFuncForPlatform(platform, uint32(ins.Constant)) + if err != nil { + return err + } + ins.Constant = int64(fn) + } + } else if ins.OpCode.Class().IsALU() { switch ins.OpCode.ALUOp() { case Div: if ins.Offset == 1 { @@ -86,33 +103,35 @@ func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, err ins.Offset = 0 } } + } else if ins.OpCode.Class() == StXClass && + ins.OpCode.Mode() == AtomicMode { + // For atomic ops, part of the opcode is stored in the + // constant field. Shift over 8 bytes so we can OR with the actual opcode and + // apply `atomicMask` to avoid merging unknown bits that may be added in the future. + ins.OpCode |= (OpCode((ins.Constant << 8)) & atomicMask) } - // Convert to int32 before widening to int64 - // to ensure the signed bit is carried over. - ins.Constant = int64(int32(bo.Uint32(data[4:8]))) - if !ins.OpCode.IsDWordLoad() { - return InstructionSize, nil + return nil } // Pull another instruction from the stream to retrieve the second // half of the 64-bit immediate value. if _, err := io.ReadFull(r, data); err != nil { // No Wrap, to avoid io.EOF clash - return 0, errors.New("64bit immediate is missing second half") + return errors.New("64bit immediate is missing second half") } // Require that all fields other than the value are zero. if bo.Uint32(data[0:4]) != 0 { - return 0, errors.New("64bit immediate has non-zero fields") + return errors.New("64bit immediate has non-zero fields") } cons1 := uint32(ins.Constant) cons2 := int32(bo.Uint32(data[4:8])) ins.Constant = int64(cons2)<<32 | int64(cons1) - return 2 * InstructionSize, nil + return nil } // Marshal encodes a BPF instruction. @@ -134,7 +153,14 @@ func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error) return 0, fmt.Errorf("can't marshal registers: %s", err) } - if ins.OpCode.Class().IsALU() { + if ins.IsBuiltinCall() { + fn := BuiltinFunc(ins.Constant) + plat, value := platform.DecodeConstant(fn) + if plat != platform.Native { + return 0, fmt.Errorf("function %s (%s): %w", fn, plat, internal.ErrNotSupportedOnOS) + } + cons = int32(value) + } else if ins.OpCode.Class().IsALU() { newOffset := int16(0) switch ins.OpCode.ALUOp() { case SDiv: @@ -157,6 +183,9 @@ func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error) return 0, fmt.Errorf("extended ALU opcodes should have an .Offset of 0: %s", ins) } ins.Offset = newOffset + } else if atomic := ins.OpCode.AtomicOp(); atomic != InvalidAtomic { + ins.OpCode = ins.OpCode &^ atomicMask + ins.Constant = int64(atomic >> 8) } op, err := ins.OpCode.bpfOpCode() @@ -368,8 +397,8 @@ func (ins Instruction) Format(f fmt.State, c rune) { fmt.Fprintf(f, "dst: %s src: %s imm: %d", ins.Dst, ins.Src, ins.Constant) case MemMode, MemSXMode: fmt.Fprintf(f, "dst: %s src: %s off: %d imm: %d", ins.Dst, ins.Src, ins.Offset, ins.Constant) - case XAddMode: - fmt.Fprintf(f, "dst: %s src: %s", ins.Dst, ins.Src) + case AtomicMode: + fmt.Fprintf(f, "dst: %s src: %s off: %d", ins.Dst, ins.Src, ins.Offset) } case cls.IsALU(): @@ -530,29 +559,24 @@ type FDer interface { // Instructions is an eBPF program. type Instructions []Instruction -// Unmarshal unmarshals an Instructions from a binary instruction stream. -// All instructions in insns are replaced by instructions decoded from r. -func (insns *Instructions) Unmarshal(r io.Reader, bo binary.ByteOrder) error { - if len(*insns) > 0 { - *insns = nil - } - +// AppendInstructions decodes [Instruction] from r and appends them to insns. +func AppendInstructions(insns Instructions, r io.Reader, bo binary.ByteOrder, platform string) (Instructions, error) { var offset uint64 for { var ins Instruction - n, err := ins.Unmarshal(r, bo) + err := ins.Unmarshal(r, bo, platform) if errors.Is(err, io.EOF) { break } if err != nil { - return fmt.Errorf("offset %d: %w", offset, err) + return nil, fmt.Errorf("offset %d: %w", offset, err) } - *insns = append(*insns, ins) - offset += n + insns = append(insns, ins) + offset += ins.Size() } - return nil + return insns, nil } // Name returns the name of the function insns belongs to, if any. @@ -804,7 +828,7 @@ func (insns Instructions) Tag(bo binary.ByteOrder) (string, error) { return "", fmt.Errorf("instruction %d: %w", i, err) } } - return hex.EncodeToString(h.Sum(nil)[:unix.BPF_TAG_SIZE]), nil + return hex.EncodeToString(h.Sum(nil)[:sys.BPF_TAG_SIZE]), nil } // encodeFunctionReferences populates the Offset (or Constant, depending on diff --git a/src/runtime/vendor/github.com/cilium/ebpf/asm/load_store.go b/src/runtime/vendor/github.com/cilium/ebpf/asm/load_store.go index cdb5c5cfa43b..29571a74ec6c 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/asm/load_store.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/asm/load_store.go @@ -1,5 +1,7 @@ package asm +import "fmt" + //go:generate go run golang.org/x/tools/cmd/stringer@latest -output load_store_string.go -type=Mode,Size // Mode for load and store operations @@ -26,10 +28,119 @@ const ( MemMode Mode = 0x60 // MemSXMode - load from memory, sign extension MemSXMode Mode = 0x80 - // XAddMode - add atomically across processors. - XAddMode Mode = 0xc0 + // AtomicMode - add atomically across processors. + AtomicMode Mode = 0xc0 +) + +const atomicMask OpCode = 0x0001_ff00 + +type AtomicOp uint32 + +const ( + InvalidAtomic AtomicOp = 0xffff_ffff + + // AddAtomic - add src to memory address dst atomically + AddAtomic AtomicOp = AtomicOp(Add) << 8 + // FetchAdd - add src to memory address dst atomically, store result in src + FetchAdd AtomicOp = AddAtomic | fetch + // AndAtomic - bitwise AND src with memory address at dst atomically + AndAtomic AtomicOp = AtomicOp(And) << 8 + // FetchAnd - bitwise AND src with memory address at dst atomically, store result in src + FetchAnd AtomicOp = AndAtomic | fetch + // OrAtomic - bitwise OR src with memory address at dst atomically + OrAtomic AtomicOp = AtomicOp(Or) << 8 + // FetchOr - bitwise OR src with memory address at dst atomically, store result in src + FetchOr AtomicOp = OrAtomic | fetch + // XorAtomic - bitwise XOR src with memory address at dst atomically + XorAtomic AtomicOp = AtomicOp(Xor) << 8 + // FetchXor - bitwise XOR src with memory address at dst atomically, store result in src + FetchXor AtomicOp = XorAtomic | fetch + + // Xchg - atomically exchange the old value with the new value + // + // src gets populated with the old value of *(size *)(dst + offset). + Xchg AtomicOp = 0x0000_e000 | fetch + // CmpXchg - atomically compare and exchange the old value with the new value + // + // Compares R0 and *(size *)(dst + offset), writes src to *(size *)(dst + offset) on match. + // R0 gets populated with the old value of *(size *)(dst + offset), even if no exchange occurs. + CmpXchg AtomicOp = 0x0000_f000 | fetch + + // fetch modifier for copy-modify-write atomics + fetch AtomicOp = 0x0000_0100 + // loadAcquire - atomically load with acquire semantics + loadAcquire AtomicOp = 0x0001_0000 + // storeRelease - atomically store with release semantics + storeRelease AtomicOp = 0x0001_1000 ) +func (op AtomicOp) String() string { + var name string + switch op { + case AddAtomic, AndAtomic, OrAtomic, XorAtomic: + name = ALUOp(op >> 8).String() + case FetchAdd, FetchAnd, FetchOr, FetchXor: + name = "Fetch" + ALUOp((op^fetch)>>8).String() + case Xchg: + name = "Xchg" + case CmpXchg: + name = "CmpXchg" + case loadAcquire: + name = "LdAcq" + case storeRelease: + name = "StRel" + default: + name = fmt.Sprintf("AtomicOp(%#x)", uint32(op)) + } + + return name +} + +func (op AtomicOp) OpCode(size Size) OpCode { + switch op { + case AddAtomic, AndAtomic, OrAtomic, XorAtomic, + FetchAdd, FetchAnd, FetchOr, FetchXor, + Xchg, CmpXchg: + switch size { + case Byte, Half: + // 8-bit and 16-bit atomic copy-modify-write atomics are not supported + return InvalidOpCode + } + } + + return OpCode(StXClass).SetMode(AtomicMode).SetSize(size).SetAtomicOp(op) +} + +// Mem emits `*(size *)(dst + offset) (op) src`. +func (op AtomicOp) Mem(dst, src Register, size Size, offset int16) Instruction { + return Instruction{ + OpCode: op.OpCode(size), + Dst: dst, + Src: src, + Offset: offset, + } +} + +// Emits `lock-acquire dst = *(size *)(src + offset)`. +func LoadAcquire(dst, src Register, size Size, offset int16) Instruction { + return Instruction{ + OpCode: loadAcquire.OpCode(size), + Dst: dst, + Src: src, + Offset: offset, + } +} + +// Emits `lock-release *(size *)(dst + offset) = src`. +func StoreRelease(dst, src Register, size Size, offset int16) Instruction { + return Instruction{ + OpCode: storeRelease.OpCode(size), + Dst: dst, + Src: src, + Offset: offset, + } +} + // Size of load and store operations // // msb lsb @@ -202,6 +313,10 @@ func StoreImmOp(size Size) OpCode { // StoreImm emits `*(size *)(dst + offset) = value`. func StoreImm(dst Register, offset int16, value int64, size Size) Instruction { + if size == DWord { + return Instruction{OpCode: InvalidOpCode} + } + return Instruction{ OpCode: StoreImmOp(size), Dst: dst, @@ -212,14 +327,10 @@ func StoreImm(dst Register, offset int16, value int64, size Size) Instruction { // StoreXAddOp returns the OpCode to atomically add a register to a value in memory. func StoreXAddOp(size Size) OpCode { - return OpCode(StXClass).SetMode(XAddMode).SetSize(size) + return AddAtomic.OpCode(size) } // StoreXAdd atomically adds src to *dst. func StoreXAdd(dst, src Register, size Size) Instruction { - return Instruction{ - OpCode: StoreXAddOp(size), - Dst: dst, - Src: src, - } + return AddAtomic.Mem(dst, src, size, 0) } diff --git a/src/runtime/vendor/github.com/cilium/ebpf/asm/load_store_string.go b/src/runtime/vendor/github.com/cilium/ebpf/asm/load_store_string.go index c48080327c0b..bbed58b66fac 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/asm/load_store_string.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/asm/load_store_string.go @@ -14,7 +14,7 @@ func _() { _ = x[IndMode-64] _ = x[MemMode-96] _ = x[MemSXMode-128] - _ = x[XAddMode-192] + _ = x[AtomicMode-192] } const ( @@ -23,7 +23,7 @@ const ( _Mode_name_2 = "IndMode" _Mode_name_3 = "MemMode" _Mode_name_4 = "MemSXMode" - _Mode_name_5 = "XAddMode" + _Mode_name_5 = "AtomicMode" _Mode_name_6 = "InvalidMode" ) diff --git a/src/runtime/vendor/github.com/cilium/ebpf/asm/opcode.go b/src/runtime/vendor/github.com/cilium/ebpf/asm/opcode.go index 1dfd0b171a4e..c82a1f8fb0b6 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/asm/opcode.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/asm/opcode.go @@ -71,24 +71,29 @@ func (cls Class) isJumpOrALU() bool { // // The encoding varies based on a 3-bit Class: // -// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 -// ??? | CLS +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// ??? | CLS // // For ALUClass and ALUCLass32: // -// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 -// OPC |S| CLS +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// 0 | OPC |S| CLS // // For LdClass, LdXclass, StClass and StXClass: // -// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 -// 0 | MDE |SIZ| CLS +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// 0 | MDE |SIZ| CLS +// +// For StXClass where MDE == AtomicMode: +// +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// 0 | ATOMIC OP | MDE |SIZ| CLS // // For JumpClass, Jump32Class: // -// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 -// 0 | OPC |S| CLS -type OpCode uint16 +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// 0 | OPC |S| CLS +type OpCode uint32 // InvalidOpCode is returned by setters on OpCode const InvalidOpCode OpCode = 0xffff @@ -138,6 +143,14 @@ func (op OpCode) Size() Size { return Size(op & sizeMask) } +// AtomicOp returns the type of atomic operation. +func (op OpCode) AtomicOp() AtomicOp { + if op.Class() != StXClass || op.Mode() != AtomicMode { + return InvalidAtomic + } + return AtomicOp(op & atomicMask) +} + // Source returns the source for branch and ALU operations. func (op OpCode) Source() Source { if !op.Class().isJumpOrALU() || op.ALUOp() == Swap { @@ -199,6 +212,13 @@ func (op OpCode) SetSize(size Size) OpCode { return (op & ^sizeMask) | OpCode(size) } +func (op OpCode) SetAtomicOp(atomic AtomicOp) OpCode { + if op.Class() != StXClass || op.Mode() != AtomicMode || !valid(OpCode(atomic), atomicMask) { + return InvalidOpCode + } + return (op & ^atomicMask) | OpCode(atomic) +} + // SetSource sets the source on jump and ALU operations. // // Returns InvalidOpCode if op is of the wrong class. @@ -247,6 +267,10 @@ func (op OpCode) String() string { mode := op.Mode() f.WriteString(strings.TrimSuffix(mode.String(), "Mode")) + if atomic := op.AtomicOp(); atomic != InvalidAtomic { + f.WriteString(strings.TrimSuffix(atomic.String(), "Atomic")) + } + switch op.Size() { case DWord: f.WriteString("DW") diff --git a/src/runtime/vendor/github.com/cilium/ebpf/attachtype_string.go b/src/runtime/vendor/github.com/cilium/ebpf/attachtype_string.go index bece896bb611..efed516b621a 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/attachtype_string.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/attachtype_string.go @@ -52,6 +52,7 @@ func _() { _ = x[AttachSkReuseportSelectOrMigrate-40] _ = x[AttachPerfEvent-41] _ = x[AttachTraceKprobeMulti-42] + _ = x[AttachTraceKprobeSession-56] _ = x[AttachLSMCgroup-43] _ = x[AttachStructOps-44] _ = x[AttachNetfilter-45] @@ -65,15 +66,35 @@ func _() { _ = x[AttachCgroupUnixGetsockname-53] _ = x[AttachNetkitPrimary-54] _ = x[AttachNetkitPeer-55] + _ = x[AttachWindowsXDP-268435457] + _ = x[AttachWindowsBind-268435458] + _ = x[AttachWindowsCGroupInet4Connect-268435459] + _ = x[AttachWindowsCGroupInet6Connect-268435460] + _ = x[AttachWindowsCgroupInet4RecvAccept-268435461] + _ = x[AttachWindowsCgroupInet6RecvAccept-268435462] + _ = x[AttachWindowsCGroupSockOps-268435463] + _ = x[AttachWindowsSample-268435464] + _ = x[AttachWindowsXDPTest-268435465] } -const _AttachType_name = "NoneCGroupInetEgressCGroupInetSockCreateCGroupSockOpsSkSKBStreamParserSkSKBStreamVerdictCGroupDeviceSkMsgVerdictCGroupInet4BindCGroupInet6BindCGroupInet4ConnectCGroupInet6ConnectCGroupInet4PostBindCGroupInet6PostBindCGroupUDP4SendmsgCGroupUDP6SendmsgLircMode2FlowDissectorCGroupSysctlCGroupUDP4RecvmsgCGroupUDP6RecvmsgCGroupGetsockoptCGroupSetsockoptTraceRawTpTraceFEntryTraceFExitModifyReturnLSMMacTraceIterCgroupInet4GetPeernameCgroupInet6GetPeernameCgroupInet4GetSocknameCgroupInet6GetSocknameXDPDevMapCgroupInetSockReleaseXDPCPUMapSkLookupXDPSkSKBVerdictSkReuseportSelectSkReuseportSelectOrMigratePerfEventTraceKprobeMultiLSMCgroupStructOpsNetfilterTCXIngressTCXEgressTraceUprobeMultiCgroupUnixConnectCgroupUnixSendmsgCgroupUnixRecvmsgCgroupUnixGetpeernameCgroupUnixGetsocknameNetkitPrimaryNetkitPeer" +const ( + _AttachType_name_0 = "NoneCGroupInetEgressCGroupInetSockCreateCGroupSockOpsSkSKBStreamParserSkSKBStreamVerdictCGroupDeviceSkMsgVerdictCGroupInet4BindCGroupInet6BindCGroupInet4ConnectCGroupInet6ConnectCGroupInet4PostBindCGroupInet6PostBindCGroupUDP4SendmsgCGroupUDP6SendmsgLircMode2FlowDissectorCGroupSysctlCGroupUDP4RecvmsgCGroupUDP6RecvmsgCGroupGetsockoptCGroupSetsockoptTraceRawTpTraceFEntryTraceFExitModifyReturnLSMMacTraceIterCgroupInet4GetPeernameCgroupInet6GetPeernameCgroupInet4GetSocknameCgroupInet6GetSocknameXDPDevMapCgroupInetSockReleaseXDPCPUMapSkLookupXDPSkSKBVerdictSkReuseportSelectSkReuseportSelectOrMigratePerfEventTraceKprobeMultiLSMCgroupStructOpsNetfilterTCXIngressTCXEgressTraceUprobeMultiCgroupUnixConnectCgroupUnixSendmsgCgroupUnixRecvmsgCgroupUnixGetpeernameCgroupUnixGetsocknameNetkitPrimaryNetkitPeerTraceKprobeSession" + _AttachType_name_1 = "WindowsXDPWindowsBindWindowsCGroupInet4ConnectWindowsCGroupInet6ConnectWindowsCgroupInet4RecvAcceptWindowsCgroupInet6RecvAcceptWindowsCGroupSockOpsWindowsSampleWindowsXDPTest" +) -var _AttachType_index = [...]uint16{0, 4, 20, 40, 53, 70, 88, 100, 112, 127, 142, 160, 178, 197, 216, 233, 250, 259, 272, 284, 301, 318, 334, 350, 360, 371, 381, 393, 399, 408, 430, 452, 474, 496, 505, 526, 535, 543, 546, 558, 575, 601, 610, 626, 635, 644, 653, 663, 672, 688, 705, 722, 739, 760, 781, 794, 804} +var ( + _AttachType_index_0 = [...]uint16{0, 4, 20, 40, 53, 70, 88, 100, 112, 127, 142, 160, 178, 197, 216, 233, 250, 259, 272, 284, 301, 318, 334, 350, 360, 371, 381, 393, 399, 408, 430, 452, 474, 496, 505, 526, 535, 543, 546, 558, 575, 601, 610, 626, 635, 644, 653, 663, 672, 688, 705, 722, 739, 760, 781, 794, 804, 822} + _AttachType_index_1 = [...]uint8{0, 10, 21, 46, 71, 99, 127, 147, 160, 174} +) func (i AttachType) String() string { - if i >= AttachType(len(_AttachType_index)-1) { + switch { + case i <= 56: + return _AttachType_name_0[_AttachType_index_0[i]:_AttachType_index_0[i+1]] + case 268435457 <= i && i <= 268435465: + i -= 268435457 + return _AttachType_name_1[_AttachType_index_1[i]:_AttachType_index_1[i+1]] + default: return "AttachType(" + strconv.FormatInt(int64(i), 10) + ")" } - return _AttachType_name[_AttachType_index[i]:_AttachType_index[i+1]] } diff --git a/src/runtime/vendor/github.com/cilium/ebpf/btf/btf.go b/src/runtime/vendor/github.com/cilium/ebpf/btf/btf.go index 671f680b2af2..d26931a04974 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/btf/btf.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/btf/btf.go @@ -7,10 +7,12 @@ import ( "errors" "fmt" "io" + "iter" + "maps" "math" "os" "reflect" - "sync" + "slices" "github.com/cilium/ebpf/internal" "github.com/cilium/ebpf/internal/sys" @@ -29,154 +31,24 @@ var ( // ID represents the unique ID of a BTF object. type ID = sys.BTFID -// immutableTypes is a set of types which musn't be changed. -type immutableTypes struct { - // All types contained by the spec, not including types from the base in - // case the spec was parsed from split BTF. - types []Type - - // Type IDs indexed by type. - typeIDs map[Type]TypeID - - // The ID of the first type in types. - firstTypeID TypeID - - // Types indexed by essential name. - // Includes all struct flavors and types with the same name. - namedTypes map[essentialName][]TypeID - - // Byte order of the types. This affects things like struct member order - // when using bitfields. - byteOrder binary.ByteOrder +type elfData struct { + sectionSizes map[string]uint32 + symbolOffsets map[elfSymbol]uint32 + fixups map[Type]bool } -func (s *immutableTypes) typeByID(id TypeID) (Type, bool) { - if id < s.firstTypeID { - return nil, false - } - - index := int(id - s.firstTypeID) - if index >= len(s.types) { - return nil, false - } - - return s.types[index], true -} - -// mutableTypes is a set of types which may be changed. -type mutableTypes struct { - imm immutableTypes - mu sync.RWMutex // protects copies below - copies map[Type]Type // map[orig]copy - copiedTypeIDs map[Type]TypeID // map[copy]origID -} - -// add a type to the set of mutable types. -// -// Copies type and all of its children once. Repeated calls with the same type -// do not copy again. -func (mt *mutableTypes) add(typ Type, typeIDs map[Type]TypeID) Type { - mt.mu.RLock() - cpy, ok := mt.copies[typ] - mt.mu.RUnlock() - - if ok { - // Fast path: the type has been copied before. - return cpy - } - - // modifyGraphPreorder copies the type graph node by node, so we can't drop - // the lock in between. - mt.mu.Lock() - defer mt.mu.Unlock() - - return copyType(typ, typeIDs, mt.copies, mt.copiedTypeIDs) -} - -// copy a set of mutable types. -func (mt *mutableTypes) copy() *mutableTypes { - if mt == nil { - return nil - } - - mtCopy := &mutableTypes{ - mt.imm, - sync.RWMutex{}, - make(map[Type]Type, len(mt.copies)), - make(map[Type]TypeID, len(mt.copiedTypeIDs)), - } - - // Prevent concurrent modification of mt.copiedTypeIDs. - mt.mu.RLock() - defer mt.mu.RUnlock() - - copiesOfCopies := make(map[Type]Type, len(mt.copies)) - for orig, copy := range mt.copies { - // NB: We make a copy of copy, not orig, so that changes to mutable types - // are preserved. - copyOfCopy := copyType(copy, mt.copiedTypeIDs, copiesOfCopies, mtCopy.copiedTypeIDs) - mtCopy.copies[orig] = copyOfCopy - } - - return mtCopy -} - -func (mt *mutableTypes) typeID(typ Type) (TypeID, error) { - if _, ok := typ.(*Void); ok { - // Equality is weird for void, since it is a zero sized type. - return 0, nil - } - - mt.mu.RLock() - defer mt.mu.RUnlock() - - id, ok := mt.copiedTypeIDs[typ] - if !ok { - return 0, fmt.Errorf("no ID for type %s: %w", typ, ErrNotFound) - } - - return id, nil -} - -func (mt *mutableTypes) typeByID(id TypeID) (Type, bool) { - immT, ok := mt.imm.typeByID(id) - if !ok { - return nil, false - } - - return mt.add(immT, mt.imm.typeIDs), true -} - -func (mt *mutableTypes) anyTypesByName(name string) ([]Type, error) { - immTypes := mt.imm.namedTypes[newEssentialName(name)] - if len(immTypes) == 0 { - return nil, fmt.Errorf("type name %s: %w", name, ErrNotFound) - } - - // Return a copy to prevent changes to namedTypes. - result := make([]Type, 0, len(immTypes)) - for _, id := range immTypes { - immT, ok := mt.imm.typeByID(id) - if !ok { - return nil, fmt.Errorf("no type with ID %d", id) - } - - // Match against the full name, not just the essential one - // in case the type being looked up is a struct flavor. - if immT.TypeName() == name { - result = append(result, mt.add(immT, mt.imm.typeIDs)) - } - } - return result, nil +type elfSymbol struct { + section string + name string } // Spec allows querying a set of Types and loading the set into the // kernel. type Spec struct { - *mutableTypes + *decoder - // String table from ELF. - strings *stringTable + // Additional data from ELF, may be nil. + elf *elfData } // LoadSpec opens file and calls LoadSpecFromReader on it. @@ -237,13 +109,13 @@ func LoadSpecAndExtInfosFromReader(rd io.ReaderAt) (*Spec, *ExtInfos, error) { // Some ELF symbols (e.g. in vmlinux) may point to virtual memory that is well // beyond this range. Since these symbols cannot be described by BTF info, // ignore them here. -func symbolOffsets(file *internal.SafeELFFile) (map[symbol]uint32, error) { +func symbolOffsets(file *internal.SafeELFFile) (map[elfSymbol]uint32, error) { symbols, err := file.Symbols() if err != nil { return nil, fmt.Errorf("can't read symbols: %v", err) } - offsets := make(map[symbol]uint32) + offsets := make(map[elfSymbol]uint32) for _, sym := range symbols { if idx := sym.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE { // Ignore things like SHN_ABS @@ -260,7 +132,7 @@ func symbolOffsets(file *internal.SafeELFFile) (map[symbol]uint32, error) { } secName := file.Sections[sym.Section].Name - offsets[symbol{secName, sym.Name}] = uint32(sym.Value) + offsets[elfSymbol{secName, sym.Name}] = uint32(sym.Value) } return offsets, nil @@ -307,9 +179,10 @@ func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) { return nil, err } - err = fixupDatasec(spec.imm.types, sectionSizes, offsets) - if err != nil { - return nil, err + spec.elf = &elfData{ + sectionSizes, + offsets, + make(map[Type]bool), } return spec, nil @@ -317,72 +190,40 @@ func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) { func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder, base *Spec) (*Spec, error) { var ( + baseDecoder *decoder baseStrings *stringTable - firstTypeID TypeID err error ) if base != nil { - if base.imm.firstTypeID != 0 { - return nil, fmt.Errorf("can't use split BTF as base") - } - + baseDecoder = base.decoder baseStrings = base.strings - - firstTypeID, err = base.nextTypeID() - if err != nil { - return nil, err - } } - types, rawStrings, err := parseBTF(btf, bo, baseStrings, base) + buf := internal.NewBufferedSectionReader(btf, 0, math.MaxInt64) + header, err := parseBTFHeader(buf, bo) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing .BTF header: %v", err) } - typeIDs, typesByName := indexTypes(types, firstTypeID) - - return &Spec{ - &mutableTypes{ - immutableTypes{ - types, - typeIDs, - firstTypeID, - typesByName, - bo, - }, - sync.RWMutex{}, - make(map[Type]Type), - make(map[Type]TypeID), - }, - rawStrings, - }, nil -} - -func indexTypes(types []Type, firstTypeID TypeID) (map[Type]TypeID, map[essentialName][]TypeID) { - namedTypes := 0 - for _, typ := range types { - if typ.TypeName() != "" { - // Do a pre-pass to figure out how big types by name has to be. - // Most types have unique names, so it's OK to ignore essentialName - // here. - namedTypes++ - } + stringsSection := io.NewSectionReader(btf, header.stringStart(), int64(header.StringLen)) + rawStrings, err := readStringTable(stringsSection, baseStrings) + if err != nil { + return nil, fmt.Errorf("read string section: %w", err) } - typeIDs := make(map[Type]TypeID, len(types)) - typesByName := make(map[essentialName][]TypeID, namedTypes) - - for i, typ := range types { - id := firstTypeID + TypeID(i) - typeIDs[typ] = id + typesSection := io.NewSectionReader(btf, header.typeStart(), int64(header.TypeLen)) + rawTypes := make([]byte, header.TypeLen) + if _, err := io.ReadFull(typesSection, rawTypes); err != nil { + return nil, fmt.Errorf("read type section: %w", err) + } - if name := newEssentialName(typ.TypeName()); name != "" { - typesByName[name] = append(typesByName[name], id) - } + decoder, err := newDecoder(rawTypes, bo, rawStrings, baseDecoder) + if err != nil { + return nil, err } - return typeIDs, typesByName + return &Spec{decoder, nil}, nil } func guessRawBTFByteOrder(r io.ReaderAt) binary.ByteOrder { @@ -400,60 +241,41 @@ func guessRawBTFByteOrder(r io.ReaderAt) binary.ByteOrder { return nil } -// parseBTF reads a .BTF section into memory and parses it into a list of -// raw types and a string table. -func parseBTF(btf io.ReaderAt, bo binary.ByteOrder, baseStrings *stringTable, base *Spec) ([]Type, *stringTable, error) { - buf := internal.NewBufferedSectionReader(btf, 0, math.MaxInt64) - header, err := parseBTFHeader(buf, bo) - if err != nil { - return nil, nil, fmt.Errorf("parsing .BTF header: %v", err) - } - - rawStrings, err := readStringTable(io.NewSectionReader(btf, header.stringStart(), int64(header.StringLen)), - baseStrings) - if err != nil { - return nil, nil, fmt.Errorf("can't read type names: %w", err) - } - - buf.Reset(io.NewSectionReader(btf, header.typeStart(), int64(header.TypeLen))) - types, err := readAndInflateTypes(buf, bo, header.TypeLen, rawStrings, base) - if err != nil { - return nil, nil, err - } - - return types, rawStrings, nil -} - -type symbol struct { - section string - name string -} - // fixupDatasec attempts to patch up missing info in Datasecs and its members by // supplementing them with information from the ELF headers and symbol table. -func fixupDatasec(types []Type, sectionSizes map[string]uint32, offsets map[symbol]uint32) error { - for _, typ := range types { - ds, ok := typ.(*Datasec) - if !ok { - continue +func (elf *elfData) fixupDatasec(typ Type) error { + if elf == nil { + return nil + } + + if ds, ok := typ.(*Datasec); ok { + if elf.fixups[ds] { + return nil } + elf.fixups[ds] = true name := ds.Name // Some Datasecs are virtual and don't have corresponding ELF sections. switch name { case ".ksyms": - // .ksyms describes forward declarations of kfunc signatures. + // .ksyms describes forward declarations of kfunc signatures, as well as + // references to kernel symbols. // Nothing to fix up, all sizes and offsets are 0. for _, vsi := range ds.Vars { - _, ok := vsi.Type.(*Func) - if !ok { - // Only Funcs are supported in the .ksyms Datasec. - return fmt.Errorf("data section %s: expected *btf.Func, not %T: %w", name, vsi.Type, ErrNotSupported) + switch t := vsi.Type.(type) { + case *Func: + continue + case *Var: + if _, ok := t.Type.(*Void); !ok { + return fmt.Errorf("data section %s: expected %s to be *Void, not %T: %w", name, vsi.Type.TypeName(), vsi.Type, ErrNotSupported) + } + default: + return fmt.Errorf("data section %s: expected to be either *btf.Func or *btf.Var, not %T: %w", name, vsi.Type, ErrNotSupported) } } - continue + return nil case ".kconfig": // .kconfig has a size of 0 and has all members' offsets set to 0. // Fix up all offsets and set the Datasec's size. @@ -466,21 +288,21 @@ func fixupDatasec(types []Type, sectionSizes map[string]uint32, offsets map[symb vsi.Type.(*Var).Linkage = GlobalVar } - continue + return nil } if ds.Size != 0 { - continue + return nil } - ds.Size, ok = sectionSizes[name] + ds.Size, ok = elf.sectionSizes[name] if !ok { return fmt.Errorf("data section %s: missing size", name) } for i := range ds.Vars { symName := ds.Vars[i].Type.TypeName() - ds.Vars[i].Offset, ok = offsets[symbol{name, symName}] + ds.Vars[i].Offset, ok = elf.symbolOffsets[elfSymbol{name, symName}] if !ok { return fmt.Errorf("data section %s: missing offset for symbol %s", name, symName) } @@ -524,36 +346,29 @@ func fixupDatasecLayout(ds *Datasec) error { return nil } -// Copy creates a copy of Spec. +// Copy a Spec. +// +// All contained types are duplicated while preserving any modifications made +// to them. func (s *Spec) Copy() *Spec { if s == nil { return nil } - return &Spec{ - s.mutableTypes.copy(), - s.strings, + cpy := &Spec{ + s.decoder.Copy(), + nil, } -} - -type sliceWriter []byte -func (sw sliceWriter) Write(p []byte) (int, error) { - if len(p) != len(sw) { - return 0, errors.New("size doesn't match") + if s.elf != nil { + cpy.elf = &elfData{ + s.elf.sectionSizes, + s.elf.symbolOffsets, + maps.Clone(s.elf.fixups), + } } - return copy(sw, p), nil -} - -// nextTypeID returns the next unallocated type ID or an error if there are no -// more type IDs. -func (s *Spec) nextTypeID() (TypeID, error) { - id := s.imm.firstTypeID + TypeID(len(s.imm.types)) - if id < s.imm.firstTypeID { - return 0, fmt.Errorf("no more type IDs") - } - return id, nil + return cpy } // TypeByID returns the BTF Type with the given type ID. @@ -561,9 +376,13 @@ func (s *Spec) nextTypeID() (TypeID, error) { // Returns an error wrapping ErrNotFound if a Type with the given ID // does not exist in the Spec. func (s *Spec) TypeByID(id TypeID) (Type, error) { - typ, ok := s.typeByID(id) - if !ok { - return nil, fmt.Errorf("look up type with ID %d (first ID is %d): %w", id, s.imm.firstTypeID, ErrNotFound) + typ, err := s.decoder.TypeByID(id) + if err != nil { + return nil, fmt.Errorf("inflate type: %w", err) + } + + if err := s.elf.fixupDatasec(typ); err != nil { + return nil, err } return typ, nil @@ -573,7 +392,7 @@ func (s *Spec) TypeByID(id TypeID) (Type, error) { // // Returns an error wrapping [ErrNotFound] if the type isn't part of the Spec. func (s *Spec) TypeID(typ Type) (TypeID, error) { - return s.mutableTypes.typeID(typ) + return s.decoder.TypeID(typ) } // AnyTypesByName returns a list of BTF Types with the given name. @@ -584,7 +403,25 @@ func (s *Spec) TypeID(typ Type) (TypeID, error) { // // Returns an error wrapping ErrNotFound if no matching Type exists in the Spec. func (s *Spec) AnyTypesByName(name string) ([]Type, error) { - return s.mutableTypes.anyTypesByName(name) + types, err := s.TypesByName(newEssentialName(name)) + if err != nil { + return nil, err + } + + for i := 0; i < len(types); i++ { + // Match against the full name, not just the essential one + // in case the type being looked up is a struct flavor. + if types[i].TypeName() != name { + types = slices.Delete(types, i, i+1) + continue + } + + if err := s.elf.fixupDatasec(types[i]); err != nil { + return nil, err + } + } + + return types, nil } // AnyTypeByName returns a Type with the given name. @@ -671,29 +508,28 @@ func LoadSplitSpecFromReader(r io.ReaderAt, base *Spec) (*Spec, error) { return loadRawSpec(r, internal.NativeEndian, base) } -// TypesIterator iterates over types of a given spec. -type TypesIterator struct { - spec *Spec - id TypeID - done bool - // The last visited type in the spec. - Type Type -} +// All iterates over all types. +func (s *Spec) All() iter.Seq2[Type, error] { + return func(yield func(Type, error) bool) { + for id := s.firstTypeID; ; id++ { + typ, err := s.TypeByID(id) + if errors.Is(err, ErrNotFound) { + return + } else if err != nil { + yield(nil, err) + return + } -// Iterate returns the types iterator. -func (s *Spec) Iterate() *TypesIterator { - return &TypesIterator{spec: s, id: s.imm.firstTypeID} -} + // Skip declTags, during unmarshaling declTags become `Tags` fields of other types. + // We keep them in the spec to avoid holes in the ID space, but for the purposes of + // iteration, they are not useful to the user. + if _, ok := typ.(*declTag); ok { + continue + } -// Next returns true as long as there are any remaining types. -func (iter *TypesIterator) Next() bool { - if iter.done { - return false + if !yield(typ, nil) { + return + } + } } - - var ok bool - iter.Type, ok = iter.spec.typeByID(iter.id) - iter.id++ - iter.done = !ok - return !iter.done } diff --git a/src/runtime/vendor/github.com/cilium/ebpf/btf/btf_types.go b/src/runtime/vendor/github.com/cilium/ebpf/btf/btf_types.go index f0e327abc0e6..d20a31969ded 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/btf/btf_types.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/btf/btf_types.go @@ -39,6 +39,7 @@ const ( kindFloat // Float // Added 5.16 kindDeclTag // DeclTag + // Added 5.17 kindTypeTag // TypeTag // Added 6.0 kindEnum64 // Enum64 @@ -129,8 +130,6 @@ func parseBTFHeader(r io.Reader, bo binary.ByteOrder) (*btfHeader, error) { return &header, nil } -var btfTypeLen = binary.Size(btfType{}) - // btfType is equivalent to struct btf_type in Documentation/bpf/btf.rst. type btfType struct { NameOff uint32 @@ -267,30 +266,53 @@ func (bt *btfType) SetSize(size uint32) { bt.SizeType = size } -func (bt *btfType) Marshal(w io.Writer, bo binary.ByteOrder) error { - buf := make([]byte, unsafe.Sizeof(*bt)) +func (bt *btfType) Encode(buf []byte, bo binary.ByteOrder) (int, error) { + if len(buf) < btfTypeSize { + return 0, fmt.Errorf("not enough bytes to marshal btfType") + } bo.PutUint32(buf[0:], bt.NameOff) bo.PutUint32(buf[4:], bt.Info) bo.PutUint32(buf[8:], bt.SizeType) - _, err := w.Write(buf) - return err -} - -type rawType struct { - btfType - data interface{} + return btfTypeSize, nil } -func (rt *rawType) Marshal(w io.Writer, bo binary.ByteOrder) error { - if err := rt.btfType.Marshal(w, bo); err != nil { - return err +// DataLen returns the length of additional type specific data in bytes. +func (bt *btfType) DataLen() (int, error) { + switch bt.Kind() { + case kindInt: + return int(unsafe.Sizeof(btfInt{})), nil + case kindPointer: + case kindArray: + return int(unsafe.Sizeof(btfArray{})), nil + case kindStruct: + fallthrough + case kindUnion: + return int(unsafe.Sizeof(btfMember{})) * bt.Vlen(), nil + case kindEnum: + return int(unsafe.Sizeof(btfEnum{})) * bt.Vlen(), nil + case kindForward: + case kindTypedef: + case kindVolatile: + case kindConst: + case kindRestrict: + case kindFunc: + case kindFuncProto: + return int(unsafe.Sizeof(btfParam{})) * bt.Vlen(), nil + case kindVar: + return int(unsafe.Sizeof(btfVariable{})), nil + case kindDatasec: + return int(unsafe.Sizeof(btfVarSecinfo{})) * bt.Vlen(), nil + case kindFloat: + case kindDeclTag: + return int(unsafe.Sizeof(btfDeclTag{})), nil + case kindTypeTag: + case kindEnum64: + return int(unsafe.Sizeof(btfEnum64{})) * bt.Vlen(), nil + default: + return 0, fmt.Errorf("unknown kind: %v", bt.Kind()) } - if rt.data == nil { - return nil - } - - return binary.Write(w, bo, rt.data) + return 0, nil } // btfInt encodes additional data for integers. @@ -375,21 +397,15 @@ type btfMember struct { var btfMemberLen = int(unsafe.Sizeof(btfMember{})) -func unmarshalBtfMembers(members []btfMember, b []byte, bo binary.ByteOrder) (int, error) { - off := 0 - for i := range members { - if off+btfMemberLen > len(b) { - return 0, fmt.Errorf("not enough bytes to unmarshal btfMember %d", i) - } - - members[i].NameOff = bo.Uint32(b[off+0:]) - members[i].Type = TypeID(bo.Uint32(b[off+4:])) - members[i].Offset = bo.Uint32(b[off+8:]) - - off += btfMemberLen +func unmarshalBtfMember(bm *btfMember, b []byte, bo binary.ByteOrder) (int, error) { + if btfMemberLen > len(b) { + return 0, fmt.Errorf("not enough bytes to unmarshal btfMember") } - return off, nil + bm.NameOff = bo.Uint32(b[0:]) + bm.Type = TypeID(bo.Uint32(b[4:])) + bm.Offset = bo.Uint32(b[8:]) + return btfMemberLen, nil } type btfVarSecinfo struct { @@ -400,21 +416,15 @@ type btfVarSecinfo struct { var btfVarSecinfoLen = int(unsafe.Sizeof(btfVarSecinfo{})) -func unmarshalBtfVarSecInfos(secinfos []btfVarSecinfo, b []byte, bo binary.ByteOrder) (int, error) { - off := 0 - for i := range secinfos { - if off+btfVarSecinfoLen > len(b) { - return 0, fmt.Errorf("not enough bytes to unmarshal btfVarSecinfo %d", i) - } - - secinfos[i].Type = TypeID(bo.Uint32(b[off+0:])) - secinfos[i].Offset = bo.Uint32(b[off+4:]) - secinfos[i].Size = bo.Uint32(b[off+8:]) - - off += btfVarSecinfoLen +func unmarshalBtfVarSecInfo(bvsi *btfVarSecinfo, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfVarSecinfoLen { + return 0, fmt.Errorf("not enough bytes to unmarshal btfVarSecinfo") } - return off, nil + bvsi.Type = TypeID(bo.Uint32(b[0:])) + bvsi.Offset = bo.Uint32(b[4:]) + bvsi.Size = bo.Uint32(b[8:]) + return btfVarSecinfoLen, nil } type btfVariable struct { @@ -439,20 +449,14 @@ type btfEnum struct { var btfEnumLen = int(unsafe.Sizeof(btfEnum{})) -func unmarshalBtfEnums(enums []btfEnum, b []byte, bo binary.ByteOrder) (int, error) { - off := 0 - for i := range enums { - if off+btfEnumLen > len(b) { - return 0, fmt.Errorf("not enough bytes to unmarshal btfEnum %d", i) - } - - enums[i].NameOff = bo.Uint32(b[off+0:]) - enums[i].Val = bo.Uint32(b[off+4:]) - - off += btfEnumLen +func unmarshalBtfEnum(be *btfEnum, b []byte, bo binary.ByteOrder) (int, error) { + if btfEnumLen > len(b) { + return 0, fmt.Errorf("not enough bytes to unmarshal btfEnum") } - return off, nil + be.NameOff = bo.Uint32(b[0:]) + be.Val = bo.Uint32(b[4:]) + return btfEnumLen, nil } type btfEnum64 struct { @@ -463,21 +467,16 @@ type btfEnum64 struct { var btfEnum64Len = int(unsafe.Sizeof(btfEnum64{})) -func unmarshalBtfEnums64(enums []btfEnum64, b []byte, bo binary.ByteOrder) (int, error) { - off := 0 - for i := range enums { - if off+btfEnum64Len > len(b) { - return 0, fmt.Errorf("not enough bytes to unmarshal btfEnum64 %d", i) - } - - enums[i].NameOff = bo.Uint32(b[off+0:]) - enums[i].ValLo32 = bo.Uint32(b[off+4:]) - enums[i].ValHi32 = bo.Uint32(b[off+8:]) - - off += btfEnum64Len +func unmarshalBtfEnum64(enum *btfEnum64, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfEnum64Len { + return 0, fmt.Errorf("not enough bytes to unmarshal btfEnum64") } - return off, nil + enum.NameOff = bo.Uint32(b[0:]) + enum.ValLo32 = bo.Uint32(b[4:]) + enum.ValHi32 = bo.Uint32(b[8:]) + + return btfEnum64Len, nil } type btfParam struct { @@ -487,20 +486,15 @@ type btfParam struct { var btfParamLen = int(unsafe.Sizeof(btfParam{})) -func unmarshalBtfParams(params []btfParam, b []byte, bo binary.ByteOrder) (int, error) { - off := 0 - for i := range params { - if off+btfParamLen > len(b) { - return 0, fmt.Errorf("not enough bytes to unmarshal btfParam %d", i) - } - - params[i].NameOff = bo.Uint32(b[off+0:]) - params[i].Type = TypeID(bo.Uint32(b[off+4:])) - - off += btfParamLen +func unmarshalBtfParam(param *btfParam, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfParamLen { + return 0, fmt.Errorf("not enough bytes to unmarshal btfParam") } - return off, nil + param.NameOff = bo.Uint32(b[0:]) + param.Type = TypeID(bo.Uint32(b[4:])) + + return btfParamLen, nil } type btfDeclTag struct { diff --git a/src/runtime/vendor/github.com/cilium/ebpf/btf/core.go b/src/runtime/vendor/github.com/cilium/ebpf/btf/core.go index ee89f98331a4..f128011dde70 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/btf/core.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/btf/core.go @@ -6,11 +6,12 @@ import ( "fmt" "math" "reflect" - "slices" "strconv" "strings" "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/platform" ) // Code in this file is derived from libbpf, which is available under a BSD @@ -46,6 +47,10 @@ func (f *COREFixup) String() string { } func (f *COREFixup) Apply(ins *asm.Instruction) error { + if !platform.IsLinux { + return fmt.Errorf("CO-RE fixup: %w", internal.ErrNotSupportedOnOS) + } + if f.poison { // Relocation is poisoned, replace the instruction with an invalid one. if ins.OpCode.IsDWordLoad() { @@ -205,8 +210,8 @@ func CORERelocate(relos []*CORERelocation, targets []*Spec, bo binary.ByteOrder, resolveTargetTypeID := targets[0].TypeID for _, target := range targets { - if bo != target.imm.byteOrder { - return nil, fmt.Errorf("can't relocate %s against %s", bo, target.imm.byteOrder) + if bo != target.byteOrder { + return nil, fmt.Errorf("can't relocate %s against %s", bo, target.byteOrder) } } @@ -259,16 +264,14 @@ func CORERelocate(relos []*CORERelocation, targets []*Spec, bo binary.ByteOrder, var targetTypes []Type for _, target := range targets { - namedTypeIDs := target.imm.namedTypes[essentialName] - targetTypes = slices.Grow(targetTypes, len(namedTypeIDs)) - for _, id := range namedTypeIDs { - typ, err := target.TypeByID(id) - if err != nil { - return nil, err - } - - targetTypes = append(targetTypes, typ) + namedTypes, err := target.TypesByName(essentialName) + if errors.Is(err, ErrNotFound) { + continue + } else if err != nil { + return nil, err } + + targetTypes = append(targetTypes, namedTypes...) } fixups, err := coreCalculateFixups(group.relos, targetTypes, bo, resolveTargetTypeID) diff --git a/src/runtime/vendor/github.com/cilium/ebpf/btf/ext_info.go b/src/runtime/vendor/github.com/cilium/ebpf/btf/ext_info.go index eb9044badf23..6ff5e2b90daa 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/btf/ext_info.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/btf/ext_info.go @@ -16,8 +16,8 @@ import ( // ExtInfos contains ELF section metadata. type ExtInfos struct { // The slices are sorted by offset in ascending order. - funcInfos map[string]FuncInfos - lineInfos map[string]LineInfos + funcInfos map[string]FuncOffsets + lineInfos map[string]LineOffsets relocationInfos map[string]CORERelocationInfos } @@ -58,9 +58,9 @@ func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, spec *Spec) (*ExtInfos, er return nil, fmt.Errorf("parsing BTF function info: %w", err) } - funcInfos := make(map[string]FuncInfos, len(btfFuncInfos)) + funcInfos := make(map[string]FuncOffsets, len(btfFuncInfos)) for section, bfis := range btfFuncInfos { - funcInfos[section], err = newFuncInfos(bfis, spec) + funcInfos[section], err = newFuncOffsets(bfis, spec) if err != nil { return nil, fmt.Errorf("section %s: func infos: %w", section, err) } @@ -72,7 +72,7 @@ func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, spec *Spec) (*ExtInfos, er return nil, fmt.Errorf("parsing BTF line info: %w", err) } - lineInfos := make(map[string]LineInfos, len(btfLineInfos)) + lineInfos := make(map[string]LineOffsets, len(btfLineInfos)) for section, blis := range btfLineInfos { lineInfos[section], err = newLineInfos(blis, spec.strings) if err != nil { @@ -102,8 +102,10 @@ func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, spec *Spec) (*ExtInfos, er return &ExtInfos{funcInfos, lineInfos, coreRelos}, nil } -type funcInfoMeta struct{} -type coreRelocationMeta struct{} +type ( + funcInfoMeta struct{} + coreRelocationMeta struct{} +) // Assign per-section metadata from BTF to a section's instructions. func (ei *ExtInfos) Assign(insns asm.Instructions, section string) { @@ -117,20 +119,20 @@ func (ei *ExtInfos) Assign(insns asm.Instructions, section string) { // Assign per-instruction metadata to the instructions in insns. func AssignMetadataToInstructions( insns asm.Instructions, - funcInfos FuncInfos, - lineInfos LineInfos, + funcInfos FuncOffsets, + lineInfos LineOffsets, reloInfos CORERelocationInfos, ) { iter := insns.Iterate() for iter.Next() { - if len(funcInfos.infos) > 0 && funcInfos.infos[0].offset == iter.Offset { - *iter.Ins = WithFuncMetadata(*iter.Ins, funcInfos.infos[0].fn) - funcInfos.infos = funcInfos.infos[1:] + if len(funcInfos) > 0 && funcInfos[0].Offset == iter.Offset { + *iter.Ins = WithFuncMetadata(*iter.Ins, funcInfos[0].Func) + funcInfos = funcInfos[1:] } - if len(lineInfos.infos) > 0 && lineInfos.infos[0].offset == iter.Offset { - *iter.Ins = iter.Ins.WithSource(lineInfos.infos[0].line) - lineInfos.infos = lineInfos.infos[1:] + if len(lineInfos) > 0 && lineInfos[0].Offset == iter.Offset { + *iter.Ins = iter.Ins.WithSource(lineInfos[0].Line) + lineInfos = lineInfos[1:] } if len(reloInfos.infos) > 0 && reloInfos.infos[0].offset == iter.Offset { @@ -159,9 +161,9 @@ marshal: var fiBuf, liBuf bytes.Buffer for { if fn := FuncMetadata(iter.Ins); fn != nil { - fi := &funcInfo{ - fn: fn, - offset: iter.Offset, + fi := &FuncOffset{ + Func: fn, + Offset: iter.Offset, } if err := fi.marshal(&fiBuf, b); err != nil { return nil, nil, fmt.Errorf("write func info: %w", err) @@ -178,9 +180,9 @@ marshal: } } - li := &lineInfo{ - line: line, - offset: iter.Offset, + li := &LineOffset{ + Offset: iter.Offset, + Line: line, } if err := li.marshal(&liBuf, b); err != nil { return nil, nil, fmt.Errorf("write line info: %w", err) @@ -333,17 +335,17 @@ func parseExtInfoRecordSize(r io.Reader, bo binary.ByteOrder) (uint32, error) { return recordSize, nil } -// FuncInfos contains a sorted list of func infos. -type FuncInfos struct { - infos []funcInfo -} +// FuncOffsets is a sorted slice of FuncOffset. +type FuncOffsets []FuncOffset // The size of a FuncInfo in BTF wire format. var FuncInfoSize = uint32(binary.Size(bpfFuncInfo{})) -type funcInfo struct { - fn *Func - offset asm.RawInstructionOffset +// FuncOffset represents a [btf.Func] and its raw instruction offset within a +// BPF program. +type FuncOffset struct { + Offset asm.RawInstructionOffset + Func *Func } type bpfFuncInfo struct { @@ -352,7 +354,7 @@ type bpfFuncInfo struct { TypeID TypeID } -func newFuncInfo(fi bpfFuncInfo, spec *Spec) (*funcInfo, error) { +func newFuncOffset(fi bpfFuncInfo, spec *Spec) (*FuncOffset, error) { typ, err := spec.TypeByID(fi.TypeID) if err != nil { return nil, err @@ -368,31 +370,32 @@ func newFuncInfo(fi bpfFuncInfo, spec *Spec) (*funcInfo, error) { return nil, fmt.Errorf("func with type ID %d doesn't have a name", fi.TypeID) } - return &funcInfo{ - fn, + return &FuncOffset{ asm.RawInstructionOffset(fi.InsnOff), + fn, }, nil } -func newFuncInfos(bfis []bpfFuncInfo, spec *Spec) (FuncInfos, error) { - fis := FuncInfos{ - infos: make([]funcInfo, 0, len(bfis)), - } +func newFuncOffsets(bfis []bpfFuncInfo, spec *Spec) (FuncOffsets, error) { + fos := make(FuncOffsets, 0, len(bfis)) + for _, bfi := range bfis { - fi, err := newFuncInfo(bfi, spec) + fi, err := newFuncOffset(bfi, spec) if err != nil { - return FuncInfos{}, fmt.Errorf("offset %d: %w", bfi.InsnOff, err) + return FuncOffsets{}, fmt.Errorf("offset %d: %w", bfi.InsnOff, err) } - fis.infos = append(fis.infos, *fi) + fos = append(fos, *fi) } - sort.Slice(fis.infos, func(i, j int) bool { - return fis.infos[i].offset <= fis.infos[j].offset + sort.Slice(fos, func(i, j int) bool { + return fos[i].Offset <= fos[j].Offset }) - return fis, nil + return fos, nil } -// LoadFuncInfos parses BTF func info in kernel wire format. -func LoadFuncInfos(reader io.Reader, bo binary.ByteOrder, recordNum uint32, spec *Spec) (FuncInfos, error) { +// LoadFuncInfos parses BTF func info from kernel wire format into a +// [FuncOffsets], a sorted slice of [btf.Func]s of (sub)programs within a BPF +// program with their corresponding raw instruction offsets. +func LoadFuncInfos(reader io.Reader, bo binary.ByteOrder, recordNum uint32, spec *Spec) (FuncOffsets, error) { fis, err := parseFuncInfoRecords( reader, bo, @@ -401,20 +404,20 @@ func LoadFuncInfos(reader io.Reader, bo binary.ByteOrder, recordNum uint32, spec false, ) if err != nil { - return FuncInfos{}, fmt.Errorf("parsing BTF func info: %w", err) + return FuncOffsets{}, fmt.Errorf("parsing BTF func info: %w", err) } - return newFuncInfos(fis, spec) + return newFuncOffsets(fis, spec) } // marshal into the BTF wire format. -func (fi *funcInfo) marshal(w *bytes.Buffer, b *Builder) error { - id, err := b.Add(fi.fn) +func (fi *FuncOffset) marshal(w *bytes.Buffer, b *Builder) error { + id, err := b.Add(fi.Func) if err != nil { return err } bfi := bpfFuncInfo{ - InsnOff: uint32(fi.offset), + InsnOff: uint32(fi.Offset), TypeID: id, } buf := make([]byte, FuncInfoSize) @@ -515,14 +518,13 @@ func (li *Line) String() string { return li.line } -// LineInfos contains a sorted list of line infos. -type LineInfos struct { - infos []lineInfo -} +// LineOffsets contains a sorted list of line infos. +type LineOffsets []LineOffset -type lineInfo struct { - line *Line - offset asm.RawInstructionOffset +// LineOffset represents a line info and its raw instruction offset. +type LineOffset struct { + Offset asm.RawInstructionOffset + Line *Line } // Constants for the format of bpfLineInfo.LineCol. @@ -541,7 +543,7 @@ type bpfLineInfo struct { } // LoadLineInfos parses BTF line info in kernel wire format. -func LoadLineInfos(reader io.Reader, bo binary.ByteOrder, recordNum uint32, spec *Spec) (LineInfos, error) { +func LoadLineInfos(reader io.Reader, bo binary.ByteOrder, recordNum uint32, spec *Spec) (LineOffsets, error) { lis, err := parseLineInfoRecords( reader, bo, @@ -550,57 +552,55 @@ func LoadLineInfos(reader io.Reader, bo binary.ByteOrder, recordNum uint32, spec false, ) if err != nil { - return LineInfos{}, fmt.Errorf("parsing BTF line info: %w", err) + return LineOffsets{}, fmt.Errorf("parsing BTF line info: %w", err) } return newLineInfos(lis, spec.strings) } -func newLineInfo(li bpfLineInfo, strings *stringTable) (lineInfo, error) { - line, err := strings.Lookup(li.LineOff) +func newLineInfo(li bpfLineInfo, strings *stringTable) (LineOffset, error) { + line, err := strings.LookupCached(li.LineOff) if err != nil { - return lineInfo{}, fmt.Errorf("lookup of line: %w", err) + return LineOffset{}, fmt.Errorf("lookup of line: %w", err) } - fileName, err := strings.Lookup(li.FileNameOff) + fileName, err := strings.LookupCached(li.FileNameOff) if err != nil { - return lineInfo{}, fmt.Errorf("lookup of filename: %w", err) + return LineOffset{}, fmt.Errorf("lookup of filename: %w", err) } lineNumber := li.LineCol >> bpfLineShift lineColumn := li.LineCol & bpfColumnMax - return lineInfo{ + return LineOffset{ + asm.RawInstructionOffset(li.InsnOff), &Line{ fileName, line, lineNumber, lineColumn, }, - asm.RawInstructionOffset(li.InsnOff), }, nil } -func newLineInfos(blis []bpfLineInfo, strings *stringTable) (LineInfos, error) { - lis := LineInfos{ - infos: make([]lineInfo, 0, len(blis)), - } +func newLineInfos(blis []bpfLineInfo, strings *stringTable) (LineOffsets, error) { + lis := make([]LineOffset, 0, len(blis)) for _, bli := range blis { li, err := newLineInfo(bli, strings) if err != nil { - return LineInfos{}, fmt.Errorf("offset %d: %w", bli.InsnOff, err) + return LineOffsets{}, fmt.Errorf("offset %d: %w", bli.InsnOff, err) } - lis.infos = append(lis.infos, li) + lis = append(lis, li) } - sort.Slice(lis.infos, func(i, j int) bool { - return lis.infos[i].offset <= lis.infos[j].offset + sort.Slice(lis, func(i, j int) bool { + return lis[i].Offset <= lis[j].Offset }) return lis, nil } // marshal writes the binary representation of the LineInfo to w. -func (li *lineInfo) marshal(w *bytes.Buffer, b *Builder) error { - line := li.line +func (li *LineOffset) marshal(w *bytes.Buffer, b *Builder) error { + line := li.Line if line.lineNumber > bpfLineMax { return fmt.Errorf("line %d exceeds %d", line.lineNumber, bpfLineMax) } @@ -620,7 +620,7 @@ func (li *lineInfo) marshal(w *bytes.Buffer, b *Builder) error { } bli := bpfLineInfo{ - uint32(li.offset), + uint32(li.Offset), fileNameOff, lineOff, (line.lineNumber << bpfLineShift) | line.lineColumn, @@ -666,20 +666,19 @@ func parseLineInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map // These records appear after a btf_ext_info_sec header in the line_info // sub-section of .BTF.ext. func parseLineInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32, offsetInBytes bool) ([]bpfLineInfo, error) { - var li bpfLineInfo - - if exp, got := uint32(binary.Size(li)), recordSize; exp != got { + if exp, got := uint32(binary.Size(bpfLineInfo{})), recordSize; exp != got { // BTF blob's record size is longer than we know how to parse. return nil, fmt.Errorf("expected LineInfo record size %d, but BTF blob contains %d", exp, got) } - out := make([]bpfLineInfo, 0, recordNum) - for i := uint32(0); i < recordNum; i++ { - if err := binary.Read(r, bo, &li); err != nil { - return nil, fmt.Errorf("can't read line info: %v", err) - } + out := make([]bpfLineInfo, recordNum) + if err := binary.Read(r, bo, out); err != nil { + return nil, fmt.Errorf("can't read line info: %v", err) + } - if offsetInBytes { + if offsetInBytes { + for i := range out { + li := &out[i] if li.InsnOff%asm.InstructionSize != 0 { return nil, fmt.Errorf("offset %v is not aligned with instruction size", li.InsnOff) } @@ -688,8 +687,6 @@ func parseLineInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, r // Convert as early as possible. li.InsnOff /= asm.InstructionSize } - - out = append(out, li) } return out, nil @@ -799,7 +796,7 @@ func parseCORERelos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map return nil, err } - records, err := parseCOREReloRecords(r, bo, recordSize, infoHeader.NumInfo) + records, err := parseCOREReloRecords(r, bo, infoHeader.NumInfo) if err != nil { return nil, fmt.Errorf("section %v: %w", secName, err) } @@ -811,7 +808,7 @@ func parseCORERelos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map // parseCOREReloRecords parses a stream of CO-RE relocation entries into a // coreRelos. These records appear after a btf_ext_info_sec header in the // core_relos sub-section of .BTF.ext. -func parseCOREReloRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) ([]bpfCORERelo, error) { +func parseCOREReloRecords(r io.Reader, bo binary.ByteOrder, recordNum uint32) ([]bpfCORERelo, error) { var out []bpfCORERelo var relo bpfCORERelo diff --git a/src/runtime/vendor/github.com/cilium/ebpf/btf/feature.go b/src/runtime/vendor/github.com/cilium/ebpf/btf/feature.go index 6feb08dfbb0f..5b427f5d3501 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/btf/feature.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/btf/feature.go @@ -11,19 +11,19 @@ import ( // haveBTF attempts to load a BTF blob containing an Int. It should pass on any // kernel that supports BPF_BTF_LOAD. -var haveBTF = internal.NewFeatureTest("BTF", "4.18", func() error { +var haveBTF = internal.NewFeatureTest("BTF", func() error { // 0-length anonymous integer err := probeBTF(&Int{}) if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) { return internal.ErrNotSupported } return err -}) +}, "4.18") // haveMapBTF attempts to load a minimal BTF blob containing a Var. It is // used as a proxy for .bss, .data and .rodata map support, which generally // come with a Var and Datasec. These were introduced in Linux 5.2. -var haveMapBTF = internal.NewFeatureTest("Map BTF (Var/Datasec)", "5.2", func() error { +var haveMapBTF = internal.NewFeatureTest("Map BTF (Var/Datasec)", func() error { if err := haveBTF(); err != nil { return err } @@ -40,12 +40,12 @@ var haveMapBTF = internal.NewFeatureTest("Map BTF (Var/Datasec)", "5.2", func() return internal.ErrNotSupported } return err -}) +}, "5.2") // haveProgBTF attempts to load a BTF blob containing a Func and FuncProto. It // is used as a proxy for ext_info (func_info) support, which depends on // Func(Proto) by definition. -var haveProgBTF = internal.NewFeatureTest("Program BTF (func/line_info)", "5.0", func() error { +var haveProgBTF = internal.NewFeatureTest("Program BTF (func/line_info)", func() error { if err := haveBTF(); err != nil { return err } @@ -60,9 +60,9 @@ var haveProgBTF = internal.NewFeatureTest("Program BTF (func/line_info)", "5.0", return internal.ErrNotSupported } return err -}) +}, "5.0") -var haveFuncLinkage = internal.NewFeatureTest("BTF func linkage", "5.6", func() error { +var haveFuncLinkage = internal.NewFeatureTest("BTF func linkage", func() error { if err := haveProgBTF(); err != nil { return err } @@ -78,9 +78,44 @@ var haveFuncLinkage = internal.NewFeatureTest("BTF func linkage", "5.6", func() return internal.ErrNotSupported } return err -}) +}, "5.6") -var haveEnum64 = internal.NewFeatureTest("ENUM64", "6.0", func() error { +var haveDeclTags = internal.NewFeatureTest("BTF decl tags", func() error { + if err := haveBTF(); err != nil { + return err + } + + t := &Typedef{ + Name: "a", + Type: &Int{}, + Tags: []string{"a"}, + } + + err := probeBTF(t) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + return err +}, "5.16") + +var haveTypeTags = internal.NewFeatureTest("BTF type tags", func() error { + if err := haveBTF(); err != nil { + return err + } + + t := &TypeTag{ + Type: &Int{}, + Value: "a", + } + + err := probeBTF(t) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + return err +}, "5.17") + +var haveEnum64 = internal.NewFeatureTest("ENUM64", func() error { if err := haveBTF(); err != nil { return err } @@ -97,7 +132,7 @@ var haveEnum64 = internal.NewFeatureTest("ENUM64", "6.0", func() error { return internal.ErrNotSupported } return err -}) +}, "6.0") func probeBTF(typ Type) error { b, err := NewBuilder([]Type{typ}) @@ -111,7 +146,7 @@ func probeBTF(typ Type) error { } fd, err := sys.BtfLoad(&sys.BtfLoadAttr{ - Btf: sys.NewSlicePointer(buf), + Btf: sys.SlicePointer(buf), BtfSize: uint32(len(buf)), }) diff --git a/src/runtime/vendor/github.com/cilium/ebpf/btf/format.go b/src/runtime/vendor/github.com/cilium/ebpf/btf/format.go index 5e581b4a8518..7deca334aed5 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/btf/format.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/btf/format.go @@ -56,7 +56,7 @@ func (gf *GoFormatter) enumIdentifier(name, element string) string { // // It encodes https://golang.org/ref/spec#Type_declarations: // -// type foo struct { bar uint32; } +// type foo struct { _ structs.HostLayout; bar uint32; } // type bar int32 func (gf *GoFormatter) writeTypeDecl(name string, typ Type) error { if name == "" { @@ -114,7 +114,7 @@ func (gf *GoFormatter) writeType(typ Type, depth int) error { // // It encodes https://golang.org/ref/spec#TypeLit. // -// struct { bar uint32; } +// struct { _ structs.HostLayout; bar uint32; } // uint32 func (gf *GoFormatter) writeTypeLit(typ Type, depth int) error { depth++ @@ -161,6 +161,9 @@ func (gf *GoFormatter) writeTypeLit(typ Type, depth int) error { case *Datasec: err = gf.writeDatasecLit(v, depth) + case *Var: + err = gf.writeTypeLit(v.Type, depth) + default: return fmt.Errorf("type %T: %w", v, ErrNotSupported) } @@ -205,7 +208,7 @@ func (gf *GoFormatter) writeIntLit(i *Int) error { } func (gf *GoFormatter) writeStructLit(size uint32, members []Member, depth int) error { - gf.w.WriteString("struct { ") + gf.w.WriteString("struct { _ structs.HostLayout; ") prevOffset := uint32(0) skippedBitfield := false @@ -295,7 +298,7 @@ func (gf *GoFormatter) writeStructField(m Member, depth int) error { } func (gf *GoFormatter) writeDatasecLit(ds *Datasec, depth int) error { - gf.w.WriteString("struct { ") + gf.w.WriteString("struct { _ structs.HostLayout; ") prevOffset := uint32(0) for i, vsi := range ds.Vars { diff --git a/src/runtime/vendor/github.com/cilium/ebpf/btf/handle.go b/src/runtime/vendor/github.com/cilium/ebpf/btf/handle.go index adfa6fed4bc3..2e483e9d90c4 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/btf/handle.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/btf/handle.go @@ -8,6 +8,7 @@ import ( "os" "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/platform" "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/unix" ) @@ -43,12 +44,16 @@ func NewHandle(b *Builder) (*Handle, error) { func NewHandleFromRawBTF(btf []byte) (*Handle, error) { const minLogSize = 64 * 1024 + if platform.IsWindows { + return nil, fmt.Errorf("btf: handle: %w", internal.ErrNotSupportedOnOS) + } + if uint64(len(btf)) > math.MaxUint32 { return nil, errors.New("BTF exceeds the maximum size") } attr := &sys.BtfLoadAttr{ - Btf: sys.NewSlicePointer(btf), + Btf: sys.SlicePointer(btf), BtfSize: uint32(len(btf)), } @@ -91,7 +96,7 @@ func NewHandleFromRawBTF(btf []byte) (*Handle, error) { logBuf = make([]byte, logSize) attr.BtfLogSize = logSize - attr.BtfLogBuf = sys.NewSlicePointer(logBuf) + attr.BtfLogBuf = sys.SlicePointer(logBuf) attr.BtfLogLevel = 1 } @@ -110,6 +115,10 @@ func NewHandleFromRawBTF(btf []byte) (*Handle, error) { // // Requires CAP_SYS_ADMIN. func NewHandleFromID(id ID) (*Handle, error) { + if platform.IsWindows { + return nil, fmt.Errorf("btf: handle: %w", internal.ErrNotSupportedOnOS) + } + fd, err := sys.BtfGetFdById(&sys.BtfGetFdByIdAttr{ Id: uint32(id), }) @@ -133,7 +142,8 @@ func NewHandleFromID(id ID) (*Handle, error) { func (h *Handle) Spec(base *Spec) (*Spec, error) { var btfInfo sys.BtfInfo btfBuffer := make([]byte, h.size) - btfInfo.Btf, btfInfo.BtfSize = sys.NewSlicePointerLen(btfBuffer) + btfInfo.Btf = sys.SlicePointer(btfBuffer) + btfInfo.BtfSize = uint32(len(btfBuffer)) if err := sys.ObjInfo(h.fd, &btfInfo); err != nil { return nil, err @@ -204,7 +214,8 @@ func newHandleInfoFromFD(fd *sys.FD) (*HandleInfo, error) { btfInfo.BtfSize = 0 nameBuffer := make([]byte, btfInfo.NameLen) - btfInfo.Name, btfInfo.NameLen = sys.NewSlicePointerLen(nameBuffer) + btfInfo.Name = sys.SlicePointer(nameBuffer) + btfInfo.NameLen = uint32(len(nameBuffer)) if err := sys.ObjInfo(fd, &btfInfo); err != nil { return nil, err } @@ -242,6 +253,11 @@ type HandleIterator struct { // Returns true if another BTF object was found. Call [HandleIterator.Err] after // the function returns false. func (it *HandleIterator) Next() bool { + if platform.IsWindows { + it.err = fmt.Errorf("btf: %w", internal.ErrNotSupportedOnOS) + return false + } + id := it.ID for { attr := &sys.BtfGetNextIdAttr{Id: id} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/btf/kernel.go b/src/runtime/vendor/github.com/cilium/ebpf/btf/kernel.go index 8584ebcb932a..fcbe650ce597 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/btf/kernel.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/btf/kernel.go @@ -5,13 +5,17 @@ import ( "fmt" "os" "path/filepath" + "slices" + "sort" "sync" "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/kallsyms" + "github.com/cilium/ebpf/internal/linux" + "github.com/cilium/ebpf/internal/platform" ) -var kernelBTF = struct { +// globalCache amortises decoding BTF across all users of the library. +var globalCache = struct { sync.RWMutex kernel *Spec modules map[string]*Spec @@ -21,99 +25,119 @@ var kernelBTF = struct { // FlushKernelSpec removes any cached kernel type information. func FlushKernelSpec() { - kallsyms.FlushKernelModuleCache() + globalCache.Lock() + defer globalCache.Unlock() - kernelBTF.Lock() - defer kernelBTF.Unlock() - - kernelBTF.kernel = nil - kernelBTF.modules = make(map[string]*Spec) + globalCache.kernel = nil + globalCache.modules = make(map[string]*Spec) } // LoadKernelSpec returns the current kernel's BTF information. // // Defaults to /sys/kernel/btf/vmlinux and falls back to scanning the file system // for vmlinux ELFs. Returns an error wrapping ErrNotSupported if BTF is not enabled. +// +// Consider using [Cache] instead. func LoadKernelSpec() (*Spec, error) { - kernelBTF.RLock() - spec := kernelBTF.kernel - kernelBTF.RUnlock() - - if spec == nil { - kernelBTF.Lock() - defer kernelBTF.Unlock() + spec, err := loadCachedKernelSpec() + return spec.Copy(), err +} - spec = kernelBTF.kernel - } +// load (and cache) the kernel spec. +// +// Does not copy Spec. +func loadCachedKernelSpec() (*Spec, error) { + globalCache.RLock() + spec := globalCache.kernel + globalCache.RUnlock() if spec != nil { - return spec.Copy(), nil + return spec, nil } - spec, _, err := loadKernelSpec() + globalCache.Lock() + defer globalCache.Unlock() + + spec, err := loadKernelSpec() if err != nil { return nil, err } - kernelBTF.kernel = spec - return spec.Copy(), nil + globalCache.kernel = spec + return spec, nil } // LoadKernelModuleSpec returns the BTF information for the named kernel module. // +// Using [Cache.Module] is faster when loading BTF for more than one module. +// // Defaults to /sys/kernel/btf/. // Returns an error wrapping ErrNotSupported if BTF is not enabled. // Returns an error wrapping fs.ErrNotExist if BTF for the specific module doesn't exist. func LoadKernelModuleSpec(module string) (*Spec, error) { - kernelBTF.RLock() - spec := kernelBTF.modules[module] - kernelBTF.RUnlock() + spec, err := loadCachedKernelModuleSpec(module) + return spec.Copy(), err +} + +// load (and cache) a module spec. +// +// Does not copy Spec. +func loadCachedKernelModuleSpec(module string) (*Spec, error) { + globalCache.RLock() + spec := globalCache.modules[module] + globalCache.RUnlock() if spec != nil { - return spec.Copy(), nil + return spec, nil } - base, err := LoadKernelSpec() + base, err := loadCachedKernelSpec() if err != nil { - return nil, fmt.Errorf("load kernel spec: %w", err) + return nil, err } - kernelBTF.Lock() - defer kernelBTF.Unlock() - - if spec = kernelBTF.modules[module]; spec != nil { - return spec.Copy(), nil - } + // NB: This only allows a single module to be parsed at a time. Not sure + // it makes a difference. + globalCache.Lock() + defer globalCache.Unlock() spec, err = loadKernelModuleSpec(module, base) if err != nil { return nil, err } - kernelBTF.modules[module] = spec - return spec.Copy(), nil + globalCache.modules[module] = spec + return spec, nil } -func loadKernelSpec() (_ *Spec, fallback bool, _ error) { +func loadKernelSpec() (_ *Spec, _ error) { + if platform.IsWindows { + return nil, internal.ErrNotSupportedOnOS + } + fh, err := os.Open("/sys/kernel/btf/vmlinux") if err == nil { defer fh.Close() spec, err := loadRawSpec(fh, internal.NativeEndian, nil) - return spec, false, err + return spec, err } file, err := findVMLinux() if err != nil { - return nil, false, err + return nil, err } defer file.Close() spec, err := LoadSpecFromReader(file) - return spec, true, err + return spec, err } func loadKernelModuleSpec(module string, base *Spec) (*Spec, error) { + if platform.IsWindows { + return nil, internal.ErrNotSupportedOnOS + } + dir, file := filepath.Split(module) if dir != "" || filepath.Ext(file) != "" { return nil, fmt.Errorf("invalid module name %q", module) @@ -130,7 +154,11 @@ func loadKernelModuleSpec(module string, base *Spec) (*Spec, error) { // findVMLinux scans multiple well-known paths for vmlinux kernel images. func findVMLinux() (*os.File, error) { - release, err := internal.KernelRelease() + if platform.IsWindows { + return nil, fmt.Errorf("find vmlinux: %w", internal.ErrNotSupportedOnOS) + } + + release, err := linux.KernelRelease() if err != nil { return nil, err } @@ -157,3 +185,114 @@ func findVMLinux() (*os.File, error) { return nil, fmt.Errorf("no BTF found for kernel version %s: %w", release, internal.ErrNotSupported) } + +// Cache allows to amortise the cost of decoding BTF across multiple call-sites. +// +// It is not safe for concurrent use. +type Cache struct { + KernelTypes *Spec + ModuleTypes map[string]*Spec + LoadedModules []string +} + +// NewCache creates a new Cache. +// +// Opportunistically reuses a global cache if possible. +func NewCache() *Cache { + globalCache.RLock() + defer globalCache.RUnlock() + + // This copy is either a no-op or very cheap, since the spec won't contain + // any inflated types. + kernel := globalCache.kernel.Copy() + if kernel == nil { + return &Cache{} + } + + modules := make(map[string]*Spec, len(globalCache.modules)) + for name, spec := range globalCache.modules { + decoder, _ := rebaseDecoder(spec.decoder, kernel.decoder) + // NB: Kernel module BTF can't contain ELF fixups because it is always + // read from sysfs. + modules[name] = &Spec{decoder: decoder} + } + + if len(modules) == 0 { + return &Cache{kernel, nil, nil} + } + + return &Cache{kernel, modules, nil} +} + +// Kernel is equivalent to [LoadKernelSpec], except that repeated calls do +// not copy the Spec. +func (c *Cache) Kernel() (*Spec, error) { + if c.KernelTypes != nil { + return c.KernelTypes, nil + } + + var err error + c.KernelTypes, err = LoadKernelSpec() + return c.KernelTypes, err +} + +// Module is equivalent to [LoadKernelModuleSpec], except that repeated calls do +// not copy the spec. +// +// All modules also share the return value of [Kernel] as their base. +func (c *Cache) Module(name string) (*Spec, error) { + if spec := c.ModuleTypes[name]; spec != nil { + return spec, nil + } + + if c.ModuleTypes == nil { + c.ModuleTypes = make(map[string]*Spec) + } + + base, err := c.Kernel() + if err != nil { + return nil, err + } + + spec, err := loadCachedKernelModuleSpec(name) + if err != nil { + return nil, err + } + + // Important: base is shared between modules. This allows inflating common + // types only once. + decoder, err := rebaseDecoder(spec.decoder, base.decoder) + if err != nil { + return nil, err + } + + spec = &Spec{decoder: decoder} + c.ModuleTypes[name] = spec + return spec, err +} + +// Modules returns a sorted list of all loaded modules. +func (c *Cache) Modules() ([]string, error) { + if c.LoadedModules != nil { + return c.LoadedModules, nil + } + + btfDir, err := os.Open("/sys/kernel/btf") + if err != nil { + return nil, err + } + defer btfDir.Close() + + entries, err := btfDir.Readdirnames(-1) + if err != nil { + return nil, err + } + + entries = slices.DeleteFunc(entries, func(s string) bool { + return s == "vmlinux" + }) + + sort.Strings(entries) + c.LoadedModules = entries + return entries, nil +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/btf/marshal.go b/src/runtime/vendor/github.com/cilium/ebpf/btf/marshal.go index f14cfa6e9735..308ce8d34786 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/btf/marshal.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/btf/marshal.go @@ -1,7 +1,6 @@ package btf import ( - "bytes" "encoding/binary" "errors" "fmt" @@ -9,6 +8,7 @@ import ( "math" "slices" "sync" + "unsafe" "github.com/cilium/ebpf/internal" ) @@ -18,6 +18,10 @@ type MarshalOptions struct { Order binary.ByteOrder // Remove function linkage information for compatibility with <5.6 kernels. StripFuncLinkage bool + // Replace decl tags with a placeholder for compatibility with <5.16 kernels. + ReplaceDeclTags bool + // Replace TypeTags with a placeholder for compatibility with <5.17 kernels. + ReplaceTypeTags bool // Replace Enum64 with a placeholder for compatibility with <6.0 kernels. ReplaceEnum64 bool // Prevent the "No type found" error when loading BTF without any types. @@ -29,6 +33,8 @@ func KernelMarshalOptions() *MarshalOptions { return &MarshalOptions{ Order: internal.NativeEndian, StripFuncLinkage: haveFuncLinkage() != nil, + ReplaceDeclTags: haveDeclTags() != nil, + ReplaceTypeTags: haveTypeTags() != nil, ReplaceEnum64: haveEnum64() != nil, PreventNoTypeFound: true, // All current kernels require this. } @@ -39,7 +45,6 @@ type encoder struct { MarshalOptions pending internal.Deque[Type] - buf *bytes.Buffer strings *stringTableBuilder ids map[Type]TypeID visited map[Type]struct{} @@ -160,12 +165,8 @@ func (b *Builder) Marshal(buf []byte, opts *MarshalOptions) ([]byte, error) { // Reserve space for the BTF header. buf = slices.Grow(buf, btfHeaderLen)[:btfHeaderLen] - w := internal.NewBuffer(buf) - defer internal.PutBuffer(w) - e := encoder{ MarshalOptions: *opts, - buf: w, strings: stb, lastID: TypeID(len(b.types)), visited: make(map[Type]struct{}, len(b.types)), @@ -193,15 +194,16 @@ func (b *Builder) Marshal(buf []byte, opts *MarshalOptions) ([]byte, error) { e.pending.Push(typ) } - if err := e.deflatePending(); err != nil { + buf, err := e.deflatePending(buf) + if err != nil { return nil, err } - length := e.buf.Len() + length := len(buf) typeLen := uint32(length - btfHeaderLen) stringLen := e.strings.Length() - buf = e.strings.AppendEncoded(e.buf.Bytes()) + buf = e.strings.AppendEncoded(buf) // Fill out the header, and write it out. header := &btfHeader{ @@ -215,7 +217,7 @@ func (b *Builder) Marshal(buf []byte, opts *MarshalOptions) ([]byte, error) { StringLen: uint32(stringLen), } - err := binary.Write(sliceWriter(buf[:btfHeaderLen]), e.Order, header) + _, err = binary.Encode(buf[:btfHeaderLen], e.Order, header) if err != nil { return nil, fmt.Errorf("write header: %v", err) } @@ -237,28 +239,27 @@ func (b *Builder) addString(str string) (uint32, error) { return b.strings.Add(str) } -func (e *encoder) allocateIDs(root Type) (err error) { - visitInPostorder(root, e.visited, func(typ Type) bool { +func (e *encoder) allocateIDs(root Type) error { + for typ := range postorder(root, e.visited) { if _, ok := typ.(*Void); ok { - return true + continue } if _, ok := e.ids[typ]; ok { - return true + continue } id := e.lastID + 1 if id < e.lastID { - err = errors.New("type ID overflow") - return false + return errors.New("type ID overflow") } e.pending.Push(typ) e.ids[typ] = id e.lastID = id - return true - }) - return + } + + return nil } // id returns the ID for the given type or panics with an error. @@ -275,7 +276,7 @@ func (e *encoder) id(typ Type) TypeID { return id } -func (e *encoder) deflatePending() error { +func (e *encoder) deflatePending(buf []byte) ([]byte, error) { // Declare root outside of the loop to avoid repeated heap allocations. var root Type @@ -283,20 +284,22 @@ func (e *encoder) deflatePending() error { root = e.pending.Shift() // Allocate IDs for all children of typ, including transitive dependencies. - if err := e.allocateIDs(root); err != nil { - return err + err := e.allocateIDs(root) + if err != nil { + return nil, err } - if err := e.deflateType(root); err != nil { + buf, err = e.deflateType(buf, root) + if err != nil { id := e.ids[root] - return fmt.Errorf("deflate %v with ID %d: %w", root, id, err) + return nil, fmt.Errorf("deflate %v with ID %d: %w", root, id, err) } } - return nil + return buf, nil } -func (e *encoder) deflateType(typ Type) (err error) { +func (e *encoder) deflateType(buf []byte, typ Type) (_ []byte, err error) { defer func() { if r := recover(); r != nil { var ok bool @@ -307,26 +310,22 @@ func (e *encoder) deflateType(typ Type) (err error) { } }() - var raw rawType + var raw btfType raw.NameOff, err = e.strings.Add(typ.TypeName()) if err != nil { - return err + return nil, err } + // Reserve space for the btfType header. + start := len(buf) + buf = append(buf, make([]byte, unsafe.Sizeof(raw))...) + switch v := typ.(type) { case *Void: - return errors.New("Void is implicit in BTF wire format") + return nil, errors.New("Void is implicit in BTF wire format") case *Int: - raw.SetKind(kindInt) - raw.SetSize(v.Size) - - var bi btfInt - bi.SetEncoding(v.Encoding) - // We need to set bits in addition to size, since btf_type_int_is_regular - // otherwise flags this as a bitfield. - bi.SetBits(byte(v.Size) * 8) - raw.data = bi + buf, err = e.deflateInt(buf, &raw, v) case *Pointer: raw.SetKind(kindPointer) @@ -334,25 +333,25 @@ func (e *encoder) deflateType(typ Type) (err error) { case *Array: raw.SetKind(kindArray) - raw.data = &btfArray{ + buf, err = binary.Append(buf, e.Order, &btfArray{ e.id(v.Type), e.id(v.Index), v.Nelems, - } + }) case *Struct: raw.SetKind(kindStruct) raw.SetSize(v.Size) - raw.data, err = e.convertMembers(&raw.btfType, v.Members) + buf, err = e.deflateMembers(buf, &raw, v.Members) case *Union: - err = e.deflateUnion(&raw, v) + buf, err = e.deflateUnion(buf, &raw, v) case *Enum: if v.Size == 8 { - err = e.deflateEnum64(&raw, v) + buf, err = e.deflateEnum64(buf, &raw, v) } else { - err = e.deflateEnum(&raw, v) + buf, err = e.deflateEnum(buf, &raw, v) } case *Fwd: @@ -368,8 +367,7 @@ func (e *encoder) deflateType(typ Type) (err error) { raw.SetType(e.id(v.Type)) case *Const: - raw.SetKind(kindConst) - raw.SetType(e.id(v.Type)) + e.deflateConst(&raw, v) case *Restrict: raw.SetKind(kindRestrict) @@ -386,55 +384,114 @@ func (e *encoder) deflateType(typ Type) (err error) { raw.SetKind(kindFuncProto) raw.SetType(e.id(v.Return)) raw.SetVlen(len(v.Params)) - raw.data, err = e.deflateFuncParams(v.Params) + buf, err = e.deflateFuncParams(buf, v.Params) case *Var: raw.SetKind(kindVar) raw.SetType(e.id(v.Type)) - raw.data = btfVariable{uint32(v.Linkage)} + buf, err = binary.Append(buf, e.Order, btfVariable{uint32(v.Linkage)}) case *Datasec: raw.SetKind(kindDatasec) raw.SetSize(v.Size) raw.SetVlen(len(v.Vars)) - raw.data = e.deflateVarSecinfos(v.Vars) + buf, err = e.deflateVarSecinfos(buf, v.Vars) case *Float: raw.SetKind(kindFloat) raw.SetSize(v.Size) case *declTag: - raw.SetKind(kindDeclTag) - raw.SetType(e.id(v.Type)) - raw.data = &btfDeclTag{uint32(v.Index)} - raw.NameOff, err = e.strings.Add(v.Value) + buf, err = e.deflateDeclTag(buf, &raw, v) - case *typeTag: - raw.SetKind(kindTypeTag) - raw.SetType(e.id(v.Type)) - raw.NameOff, err = e.strings.Add(v.Value) + case *TypeTag: + err = e.deflateTypeTag(&raw, v) default: - return fmt.Errorf("don't know how to deflate %T", v) + return nil, fmt.Errorf("don't know how to deflate %T", v) } if err != nil { - return err + return nil, err + } + + header := buf[start : start+int(unsafe.Sizeof(raw))] + if _, err = raw.Encode(header, e.Order); err != nil { + return nil, err + } + + return buf, nil +} + +func (e *encoder) deflateInt(buf []byte, raw *btfType, i *Int) ([]byte, error) { + raw.SetKind(kindInt) + raw.SetSize(i.Size) + + var bi btfInt + bi.SetEncoding(i.Encoding) + // We need to set bits in addition to size, since btf_type_int_is_regular + // otherwise flags this as a bitfield. + bi.SetBits(byte(i.Size) * 8) + return binary.Append(buf, e.Order, bi) +} + +func (e *encoder) deflateDeclTag(buf []byte, raw *btfType, tag *declTag) ([]byte, error) { + // Replace a decl tag with an integer for compatibility with <5.16 kernels, + // following libbpf behaviour. + if e.ReplaceDeclTags { + typ := &Int{"decl_tag_placeholder", 1, Unsigned} + buf, err := e.deflateInt(buf, raw, typ) + if err != nil { + return nil, err + } + + // Add the placeholder type name to the string table. The encoder added the + // original type name before this call. + raw.NameOff, err = e.strings.Add(typ.TypeName()) + return buf, err + } + + var err error + raw.SetKind(kindDeclTag) + raw.SetType(e.id(tag.Type)) + raw.NameOff, err = e.strings.Add(tag.Value) + if err != nil { + return nil, err + } + + return binary.Append(buf, e.Order, btfDeclTag{uint32(tag.Index)}) +} + +func (e *encoder) deflateConst(raw *btfType, c *Const) { + raw.SetKind(kindConst) + raw.SetType(e.id(c.Type)) +} + +func (e *encoder) deflateTypeTag(raw *btfType, tag *TypeTag) (err error) { + // Replace a type tag with a const qualifier for compatibility with <5.17 + // kernels, following libbpf behaviour. + if e.ReplaceTypeTags { + e.deflateConst(raw, &Const{tag.Type}) + return nil } - return raw.Marshal(e.buf, e.Order) + raw.SetKind(kindTypeTag) + raw.SetType(e.id(tag.Type)) + raw.NameOff, err = e.strings.Add(tag.Value) + return } -func (e *encoder) deflateUnion(raw *rawType, union *Union) (err error) { +func (e *encoder) deflateUnion(buf []byte, raw *btfType, union *Union) ([]byte, error) { raw.SetKind(kindUnion) raw.SetSize(union.Size) - raw.data, err = e.convertMembers(&raw.btfType, union.Members) - return + return e.deflateMembers(buf, raw, union.Members) } -func (e *encoder) convertMembers(header *btfType, members []Member) ([]btfMember, error) { - bms := make([]btfMember, 0, len(members)) +func (e *encoder) deflateMembers(buf []byte, header *btfType, members []Member) ([]byte, error) { + var bm btfMember isBitfield := false + + buf = slices.Grow(buf, len(members)*int(unsafe.Sizeof(bm))) for _, member := range members { isBitfield = isBitfield || member.BitfieldSize > 0 @@ -448,30 +505,35 @@ func (e *encoder) convertMembers(header *btfType, members []Member) ([]btfMember return nil, err } - bms = append(bms, btfMember{ + bm = btfMember{ nameOff, e.id(member.Type), uint32(offset), - }) + } + + buf, err = binary.Append(buf, e.Order, &bm) + if err != nil { + return nil, err + } } header.SetVlen(len(members)) header.SetBitfield(isBitfield) - return bms, nil + return buf, nil } -func (e *encoder) deflateEnum(raw *rawType, enum *Enum) (err error) { +func (e *encoder) deflateEnum(buf []byte, raw *btfType, enum *Enum) ([]byte, error) { raw.SetKind(kindEnum) raw.SetSize(enum.Size) raw.SetVlen(len(enum.Values)) // Signedness appeared together with ENUM64 support. raw.SetSigned(enum.Signed && !e.ReplaceEnum64) - raw.data, err = e.deflateEnumValues(enum) - return + return e.deflateEnumValues(buf, enum) } -func (e *encoder) deflateEnumValues(enum *Enum) ([]btfEnum, error) { - bes := make([]btfEnum, 0, len(enum.Values)) +func (e *encoder) deflateEnumValues(buf []byte, enum *Enum) ([]byte, error) { + var be btfEnum + buf = slices.Grow(buf, len(enum.Values)*int(unsafe.Sizeof(be))) for _, value := range enum.Values { nameOff, err := e.strings.Add(value.Name) if err != nil { @@ -488,16 +550,21 @@ func (e *encoder) deflateEnumValues(enum *Enum) ([]btfEnum, error) { } } - bes = append(bes, btfEnum{ + be = btfEnum{ nameOff, uint32(value.Value), - }) + } + + buf, err = binary.Append(buf, e.Order, &be) + if err != nil { + return nil, err + } } - return bes, nil + return buf, nil } -func (e *encoder) deflateEnum64(raw *rawType, enum *Enum) (err error) { +func (e *encoder) deflateEnum64(buf []byte, raw *btfType, enum *Enum) ([]byte, error) { if e.ReplaceEnum64 { // Replace the ENUM64 with a union of fields with the correct size. // This matches libbpf behaviour on purpose. @@ -510,7 +577,7 @@ func (e *encoder) deflateEnum64(raw *rawType, enum *Enum) (err error) { placeholder.Encoding = Signed } if err := e.allocateIDs(placeholder); err != nil { - return fmt.Errorf("add enum64 placeholder: %w", err) + return nil, fmt.Errorf("add enum64 placeholder: %w", err) } members := make([]Member, 0, len(enum.Values)) @@ -521,61 +588,79 @@ func (e *encoder) deflateEnum64(raw *rawType, enum *Enum) (err error) { }) } - return e.deflateUnion(raw, &Union{enum.Name, enum.Size, members}) + return e.deflateUnion(buf, raw, &Union{enum.Name, enum.Size, members, nil}) } raw.SetKind(kindEnum64) raw.SetSize(enum.Size) raw.SetVlen(len(enum.Values)) raw.SetSigned(enum.Signed) - raw.data, err = e.deflateEnum64Values(enum.Values) - return + return e.deflateEnum64Values(buf, enum.Values) } -func (e *encoder) deflateEnum64Values(values []EnumValue) ([]btfEnum64, error) { - bes := make([]btfEnum64, 0, len(values)) +func (e *encoder) deflateEnum64Values(buf []byte, values []EnumValue) ([]byte, error) { + var be btfEnum64 + buf = slices.Grow(buf, len(values)*int(unsafe.Sizeof(be))) for _, value := range values { nameOff, err := e.strings.Add(value.Name) if err != nil { return nil, err } - bes = append(bes, btfEnum64{ + be = btfEnum64{ nameOff, uint32(value.Value), uint32(value.Value >> 32), - }) + } + + buf, err = binary.Append(buf, e.Order, &be) + if err != nil { + return nil, err + } } - return bes, nil + return buf, nil } -func (e *encoder) deflateFuncParams(params []FuncParam) ([]btfParam, error) { - bps := make([]btfParam, 0, len(params)) +func (e *encoder) deflateFuncParams(buf []byte, params []FuncParam) ([]byte, error) { + var bp btfParam + buf = slices.Grow(buf, len(params)*int(unsafe.Sizeof(bp))) for _, param := range params { nameOff, err := e.strings.Add(param.Name) if err != nil { return nil, err } - bps = append(bps, btfParam{ + bp = btfParam{ nameOff, e.id(param.Type), - }) + } + + buf, err = binary.Append(buf, e.Order, &bp) + if err != nil { + return nil, err + } } - return bps, nil + return buf, nil } -func (e *encoder) deflateVarSecinfos(vars []VarSecinfo) []btfVarSecinfo { - vsis := make([]btfVarSecinfo, 0, len(vars)) +func (e *encoder) deflateVarSecinfos(buf []byte, vars []VarSecinfo) ([]byte, error) { + var vsi btfVarSecinfo + var err error + buf = slices.Grow(buf, len(vars)*int(unsafe.Sizeof(vsi))) for _, v := range vars { - vsis = append(vsis, btfVarSecinfo{ + vsi = btfVarSecinfo{ e.id(v.Type), v.Offset, v.Size, - }) + } + + buf, err = binary.Append(buf, e.Order, vsi) + if err != nil { + return nil, err + } } - return vsis + return buf, nil } // MarshalMapKV creates a BTF object containing a map key and value. diff --git a/src/runtime/vendor/github.com/cilium/ebpf/btf/strings.go b/src/runtime/vendor/github.com/cilium/ebpf/btf/strings.go index 7c31461c3064..d09151c0eee9 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/btf/strings.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/btf/strings.go @@ -1,21 +1,24 @@ package btf import ( - "bufio" "bytes" "errors" "fmt" "io" "maps" - "slices" "strings" + "sync" ) +// stringTable is contains a sequence of null-terminated strings. +// +// It is safe for concurrent use. type stringTable struct { - base *stringTable - offsets []uint32 - prevIdx int - strings []string + base *stringTable + bytes []byte + + mu sync.Mutex + cache map[uint32]string } // sizedReader is implemented by bytes.Reader, io.SectionReader, strings.Reader, etc. @@ -29,89 +32,94 @@ func readStringTable(r sizedReader, base *stringTable) (*stringTable, error) { // from the last entry offset of the base BTF. firstStringOffset := uint32(0) if base != nil { - idx := len(base.offsets) - 1 - firstStringOffset = base.offsets[idx] + uint32(len(base.strings[idx])) + 1 + firstStringOffset = uint32(len(base.bytes)) } - // Derived from vmlinux BTF. - const averageStringLength = 16 - - n := int(r.Size() / averageStringLength) - offsets := make([]uint32, 0, n) - strings := make([]string, 0, n) - - offset := firstStringOffset - scanner := bufio.NewScanner(r) - scanner.Split(splitNull) - for scanner.Scan() { - str := scanner.Text() - offsets = append(offsets, offset) - strings = append(strings, str) - offset += uint32(len(str)) + 1 - } - if err := scanner.Err(); err != nil { + bytes := make([]byte, r.Size()) + if _, err := io.ReadFull(r, bytes); err != nil { return nil, err } - if len(strings) == 0 { + if len(bytes) == 0 { return nil, errors.New("string table is empty") } - if firstStringOffset == 0 && strings[0] != "" { - return nil, errors.New("first item in string table is non-empty") + if bytes[len(bytes)-1] != 0 { + return nil, errors.New("string table isn't null terminated") } - return &stringTable{base, offsets, 0, strings}, nil -} - -func splitNull(data []byte, atEOF bool) (advance int, token []byte, err error) { - i := bytes.IndexByte(data, 0) - if i == -1 { - if atEOF && len(data) > 0 { - return 0, nil, errors.New("string table isn't null terminated") - } - return 0, nil, nil + if firstStringOffset == 0 && bytes[0] != 0 { + return nil, errors.New("first item in string table is non-empty") } - return i + 1, data[:i], nil + return &stringTable{base: base, bytes: bytes}, nil } func (st *stringTable) Lookup(offset uint32) (string, error) { - if st.base != nil && offset <= st.base.offsets[len(st.base.offsets)-1] { - return st.base.lookup(offset) + // Fast path: zero offset is the empty string, looked up frequently. + if offset == 0 { + return "", nil } - return st.lookup(offset) + + b, err := st.lookupSlow(offset) + return string(b), err } -func (st *stringTable) lookup(offset uint32) (string, error) { +func (st *stringTable) LookupBytes(offset uint32) ([]byte, error) { // Fast path: zero offset is the empty string, looked up frequently. - if offset == 0 && st.base == nil { - return "", nil + if offset == 0 { + return nil, nil } - // Accesses tend to be globally increasing, so check if the next string is - // the one we want. This skips the binary search in about 50% of cases. - if st.prevIdx+1 < len(st.offsets) && st.offsets[st.prevIdx+1] == offset { - st.prevIdx++ - return st.strings[st.prevIdx], nil + return st.lookupSlow(offset) +} + +func (st *stringTable) lookupSlow(offset uint32) ([]byte, error) { + if st.base != nil { + n := uint32(len(st.base.bytes)) + if offset < n { + return st.base.lookupSlow(offset) + } + offset -= n } - i, found := slices.BinarySearch(st.offsets, offset) - if !found { - return "", fmt.Errorf("offset %d isn't start of a string", offset) + if offset > uint32(len(st.bytes)) { + return nil, fmt.Errorf("offset %d is out of bounds of string table", offset) } - // Set the new increment index, but only if its greater than the current. - if i > st.prevIdx+1 { - st.prevIdx = i + if offset > 0 && st.bytes[offset-1] != 0 { + return nil, fmt.Errorf("offset %d is not the beginning of a string", offset) } - return st.strings[i], nil + i := bytes.IndexByte(st.bytes[offset:], 0) + return st.bytes[offset : offset+uint32(i)], nil } -// Num returns the number of strings in the table. -func (st *stringTable) Num() int { - return len(st.strings) +// LookupCache returns the string at the given offset, caching the result +// for future lookups. +func (cst *stringTable) LookupCached(offset uint32) (string, error) { + // Fast path: zero offset is the empty string, looked up frequently. + if offset == 0 { + return "", nil + } + + cst.mu.Lock() + defer cst.mu.Unlock() + + if str, ok := cst.cache[offset]; ok { + return str, nil + } + + str, err := cst.Lookup(offset) + if err != nil { + return "", err + } + + if cst.cache == nil { + cst.cache = make(map[uint32]string) + } + cst.cache[offset] = str + return str, nil } // stringTableBuilder builds BTF string tables. diff --git a/src/runtime/vendor/github.com/cilium/ebpf/btf/traversal.go b/src/runtime/vendor/github.com/cilium/ebpf/btf/traversal.go index c39dc66e46ce..57c1dc27e84e 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/btf/traversal.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/btf/traversal.go @@ -2,16 +2,21 @@ package btf import ( "fmt" + "iter" ) // Functions to traverse a cyclic graph of types. The below was very useful: // https://eli.thegreenplace.net/2015/directed-graph-traversal-orderings-and-applications-to-data-flow-analysis/#post-order-and-reverse-post-order -// Visit all types reachable from root in postorder. -// -// Traversal stops if yield returns false. -// -// Returns false if traversal was aborted. +// postorder yields all types reachable from root in post order. +func postorder(root Type, visited map[Type]struct{}) iter.Seq[Type] { + return func(yield func(Type) bool) { + visitInPostorder(root, visited, yield) + } +} + +// visitInPostorder is a separate function to avoid arguments escaping +// to the heap. Don't change the setup without re-running the benchmarks. func visitInPostorder(root Type, visited map[Type]struct{}, yield func(typ Type) bool) bool { if _, ok := visited[root]; ok { return true @@ -21,103 +26,134 @@ func visitInPostorder(root Type, visited map[Type]struct{}, yield func(typ Type) } visited[root] = struct{}{} - cont := children(root, func(child *Type) bool { - return visitInPostorder(*child, visited, yield) - }) - if !cont { - return false + for child := range children(root) { + if !visitInPostorder(*child, visited, yield) { + return false + } } return yield(root) } -// children calls yield on each child of typ. -// -// Traversal stops if yield returns false. -// -// Returns false if traversal was aborted. -func children(typ Type, yield func(child *Type) bool) bool { - // Explicitly type switch on the most common types to allow the inliner to - // do its work. This avoids allocating intermediate slices from walk() on - // the heap. - switch v := typ.(type) { - case *Void, *Int, *Enum, *Fwd, *Float: - // No children to traverse. - case *Pointer: - if !yield(&v.Target) { - return false - } - case *Array: - if !yield(&v.Index) { - return false - } - if !yield(&v.Type) { - return false - } - case *Struct: - for i := range v.Members { - if !yield(&v.Members[i].Type) { - return false +// children yields all direct descendants of typ. +func children(typ Type) iter.Seq[*Type] { + return func(yield func(*Type) bool) { + // Explicitly type switch on the most common types to allow the inliner to + // do its work. This avoids allocating intermediate slices from walk() on + // the heap. + var tags []string + switch v := typ.(type) { + case *Void, *Int, *Enum, *Fwd, *Float, *declTag: + // No children to traverse. + // declTags is declared as a leaf type since it's parsed into .Tags fields of other types + // during unmarshaling. + case *Pointer: + if !yield(&v.Target) { + return } - } - case *Union: - for i := range v.Members { - if !yield(&v.Members[i].Type) { - return false + case *Array: + if !yield(&v.Index) { + return } - } - case *Typedef: - if !yield(&v.Type) { - return false - } - case *Volatile: - if !yield(&v.Type) { - return false - } - case *Const: - if !yield(&v.Type) { - return false - } - case *Restrict: - if !yield(&v.Type) { - return false - } - case *Func: - if !yield(&v.Type) { - return false - } - case *FuncProto: - if !yield(&v.Return) { - return false - } - for i := range v.Params { - if !yield(&v.Params[i].Type) { - return false + if !yield(&v.Type) { + return } - } - case *Var: - if !yield(&v.Type) { - return false - } - case *Datasec: - for i := range v.Vars { - if !yield(&v.Vars[i].Type) { - return false + case *Struct: + for i := range v.Members { + if !yield(&v.Members[i].Type) { + return + } + for _, t := range v.Members[i].Tags { + var tag Type = &declTag{v, t, i} + if !yield(&tag) { + return + } + } } + tags = v.Tags + case *Union: + for i := range v.Members { + if !yield(&v.Members[i].Type) { + return + } + for _, t := range v.Members[i].Tags { + var tag Type = &declTag{v, t, i} + if !yield(&tag) { + return + } + } + } + tags = v.Tags + case *Typedef: + if !yield(&v.Type) { + return + } + tags = v.Tags + case *Volatile: + if !yield(&v.Type) { + return + } + case *Const: + if !yield(&v.Type) { + return + } + case *Restrict: + if !yield(&v.Type) { + return + } + case *Func: + if !yield(&v.Type) { + return + } + if fp, ok := v.Type.(*FuncProto); ok { + for i := range fp.Params { + if len(v.ParamTags) <= i { + continue + } + for _, t := range v.ParamTags[i] { + var tag Type = &declTag{v, t, i} + if !yield(&tag) { + return + } + } + } + } + tags = v.Tags + case *FuncProto: + if !yield(&v.Return) { + return + } + for i := range v.Params { + if !yield(&v.Params[i].Type) { + return + } + } + case *Var: + if !yield(&v.Type) { + return + } + tags = v.Tags + case *Datasec: + for i := range v.Vars { + if !yield(&v.Vars[i].Type) { + return + } + } + case *TypeTag: + if !yield(&v.Type) { + return + } + case *cycle: + // cycle has children, but we ignore them deliberately. + default: + panic(fmt.Sprintf("don't know how to walk Type %T", v)) } - case *declTag: - if !yield(&v.Type) { - return false - } - case *typeTag: - if !yield(&v.Type) { - return false + + for _, t := range tags { + var tag Type = &declTag{typ, t, -1} + if !yield(&tag) { + return + } } - case *cycle: - // cycle has children, but we ignore them deliberately. - default: - panic(fmt.Sprintf("don't know how to walk Type %T", v)) } - - return true } diff --git a/src/runtime/vendor/github.com/cilium/ebpf/btf/types.go b/src/runtime/vendor/github.com/cilium/ebpf/btf/types.go index a3397460b9d5..fc0a59744707 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/btf/types.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/btf/types.go @@ -1,12 +1,10 @@ package btf import ( - "encoding/binary" "errors" "fmt" "io" "math" - "slices" "strings" "github.com/cilium/ebpf/asm" @@ -67,7 +65,7 @@ var ( _ Type = (*Datasec)(nil) _ Type = (*Float)(nil) _ Type = (*declTag)(nil) - _ Type = (*typeTag)(nil) + _ Type = (*TypeTag)(nil) _ Type = (*cycle)(nil) ) @@ -169,6 +167,7 @@ type Struct struct { // The size of the struct including padding, in bytes Size uint32 Members []Member + Tags []string } func (s *Struct) Format(fs fmt.State, verb rune) { @@ -182,6 +181,7 @@ func (s *Struct) size() uint32 { return s.Size } func (s *Struct) copy() Type { cpy := *s cpy.Members = copyMembers(s.Members) + cpy.Tags = copyTags(cpy.Tags) return &cpy } @@ -195,6 +195,7 @@ type Union struct { // The size of the union including padding, in bytes. Size uint32 Members []Member + Tags []string } func (u *Union) Format(fs fmt.State, verb rune) { @@ -208,6 +209,7 @@ func (u *Union) size() uint32 { return u.Size } func (u *Union) copy() Type { cpy := *u cpy.Members = copyMembers(u.Members) + cpy.Tags = copyTags(cpy.Tags) return &cpy } @@ -218,6 +220,18 @@ func (u *Union) members() []Member { func copyMembers(orig []Member) []Member { cpy := make([]Member, len(orig)) copy(cpy, orig) + for i, member := range cpy { + cpy[i].Tags = copyTags(member.Tags) + } + return cpy +} + +func copyTags(orig []string) []string { + if orig == nil { // preserve nil vs zero-len slice distinction + return nil + } + cpy := make([]string, len(orig)) + copy(cpy, orig) return cpy } @@ -247,6 +261,7 @@ type Member struct { Type Type Offset Bits BitfieldSize Bits + Tags []string } // Enum lists possible values. @@ -334,6 +349,7 @@ func (f *Fwd) matches(typ Type) bool { type Typedef struct { Name string Type Type + Tags []string } func (td *Typedef) Format(fs fmt.State, verb rune) { @@ -344,6 +360,7 @@ func (td *Typedef) TypeName() string { return td.Name } func (td *Typedef) copy() Type { cpy := *td + cpy.Tags = copyTags(td.Tags) return &cpy } @@ -403,6 +420,12 @@ type Func struct { Name string Type Type Linkage FuncLinkage + Tags []string + // ParamTags holds a list of tags for each parameter of the FuncProto to which `Type` points. + // If no tags are present for any param, the outer slice will be nil/len(ParamTags)==0. + // If at least 1 param has a tag, the outer slice will have the same length as the number of params. + // The inner slice contains the tags and may be nil/len(ParamTags[i])==0 if no tags are present for that param. + ParamTags [][]string } func FuncMetadata(ins *asm.Instruction) *Func { @@ -424,6 +447,14 @@ func (f *Func) TypeName() string { return f.Name } func (f *Func) copy() Type { cpy := *f + cpy.Tags = copyTags(f.Tags) + if f.ParamTags != nil { // preserve nil vs zero-len slice distinction + ptCopy := make([][]string, len(f.ParamTags)) + for i, tags := range f.ParamTags { + ptCopy[i] = copyTags(tags) + } + cpy.ParamTags = ptCopy + } return &cpy } @@ -456,6 +487,7 @@ type Var struct { Name string Type Type Linkage VarLinkage + Tags []string } func (v *Var) Format(fs fmt.State, verb rune) { @@ -466,6 +498,7 @@ func (v *Var) TypeName() string { return v.Name } func (v *Var) copy() Type { cpy := *v + cpy.Tags = copyTags(v.Tags) return &cpy } @@ -540,19 +573,25 @@ func (dt *declTag) copy() Type { return &cpy } -// typeTag associates metadata with a type. -type typeTag struct { +// TypeTag associates metadata with a pointer type. Tag types act as a custom +// modifier(const, restrict, volatile) for the target type. Unlike declTags, +// TypeTags are ordered so the order in which they are added matters. +// +// One of their uses is to mark pointers as `__kptr` meaning a pointer points +// to kernel memory. Adding a `__kptr` to pointers in map values allows you +// to store pointers to kernel memory in maps. +type TypeTag struct { Type Type Value string } -func (tt *typeTag) Format(fs fmt.State, verb rune) { +func (tt *TypeTag) Format(fs fmt.State, verb rune) { formatType(fs, verb, tt, "type=", tt.Type, "value=", tt.Value) } -func (tt *typeTag) TypeName() string { return "" } -func (tt *typeTag) qualify() Type { return tt.Type } -func (tt *typeTag) copy() Type { +func (tt *TypeTag) TypeName() string { return "" } +func (tt *TypeTag) qualify() Type { return tt.Type } +func (tt *TypeTag) copy() Type { cpy := *tt return &cpy } @@ -591,7 +630,7 @@ var ( _ qualifier = (*Const)(nil) _ qualifier = (*Restrict)(nil) _ qualifier = (*Volatile)(nil) - _ qualifier = (*typeTag)(nil) + _ qualifier = (*TypeTag)(nil) ) var errUnsizedType = errors.New("type is unsized") @@ -699,477 +738,15 @@ func copyType(typ Type, ids map[Type]TypeID, copies map[Type]Type, copiedIDs map copiedIDs[cpy] = id } - children(cpy, func(child *Type) bool { + for child := range children(cpy) { *child = copyType(*child, ids, copies, copiedIDs) - return true - }) + } return cpy } type typeDeque = internal.Deque[*Type] -// readAndInflateTypes reads the raw btf type info and turns it into a graph -// of Types connected via pointers. -// -// If base is provided, then the types are considered to be of a split BTF -// (e.g., a kernel module). -// -// Returns a slice of types indexed by TypeID. Since BTF ignores compilation -// units, multiple types may share the same name. A Type may form a cyclic graph -// by pointing at itself. -func readAndInflateTypes(r io.Reader, bo binary.ByteOrder, typeLen uint32, rawStrings *stringTable, base *Spec) ([]Type, error) { - // because of the interleaving between types and struct members it is difficult to - // precompute the numbers of raw types this will parse - // this "guess" is a good first estimation - sizeOfbtfType := uintptr(btfTypeLen) - tyMaxCount := uintptr(typeLen) / sizeOfbtfType / 2 - types := make([]Type, 0, tyMaxCount) - - // Void is defined to always be type ID 0, and is thus omitted from BTF. - types = append(types, (*Void)(nil)) - - firstTypeID := TypeID(0) - if base != nil { - var err error - firstTypeID, err = base.nextTypeID() - if err != nil { - return nil, err - } - - // Split BTF doesn't contain Void. - types = types[:0] - } - - type fixupDef struct { - id TypeID - typ *Type - } - - var fixups []fixupDef - fixup := func(id TypeID, typ *Type) { - if id < firstTypeID { - if baseType, err := base.TypeByID(id); err == nil { - *typ = baseType - return - } - } - - idx := int(id - firstTypeID) - if idx < len(types) { - // We've already inflated this type, fix it up immediately. - *typ = types[idx] - return - } - - fixups = append(fixups, fixupDef{id, typ}) - } - - type bitfieldFixupDef struct { - id TypeID - m *Member - } - - var ( - legacyBitfields = make(map[TypeID][2]Bits) // offset, size - bitfieldFixups []bitfieldFixupDef - ) - convertMembers := func(raw []btfMember, kindFlag bool) ([]Member, error) { - // NB: The fixup below relies on pre-allocating this array to - // work, since otherwise append might re-allocate members. - members := make([]Member, 0, len(raw)) - for i, btfMember := range raw { - name, err := rawStrings.Lookup(btfMember.NameOff) - if err != nil { - return nil, fmt.Errorf("can't get name for member %d: %w", i, err) - } - - members = append(members, Member{ - Name: name, - Offset: Bits(btfMember.Offset), - }) - - m := &members[i] - fixup(raw[i].Type, &m.Type) - - if kindFlag { - m.BitfieldSize = Bits(btfMember.Offset >> 24) - m.Offset &= 0xffffff - // We ignore legacy bitfield definitions if the current composite - // is a new-style bitfield. This is kind of safe since offset and - // size on the type of the member must be zero if kindFlat is set - // according to spec. - continue - } - - // This may be a legacy bitfield, try to fix it up. - data, ok := legacyBitfields[raw[i].Type] - if ok { - // Bingo! - m.Offset += data[0] - m.BitfieldSize = data[1] - continue - } - - if m.Type != nil { - // We couldn't find a legacy bitfield, but we know that the member's - // type has already been inflated. Hence we know that it can't be - // a legacy bitfield and there is nothing left to do. - continue - } - - // We don't have fixup data, and the type we're pointing - // at hasn't been inflated yet. No choice but to defer - // the fixup. - bitfieldFixups = append(bitfieldFixups, bitfieldFixupDef{ - raw[i].Type, - m, - }) - } - return members, nil - } - - var ( - buf = make([]byte, 1024) - header btfType - bInt btfInt - bArr btfArray - bMembers []btfMember - bEnums []btfEnum - bParams []btfParam - bVariable btfVariable - bSecInfos []btfVarSecinfo - bDeclTag btfDeclTag - bEnums64 []btfEnum64 - ) - - var declTags []*declTag - for { - var ( - id = firstTypeID + TypeID(len(types)) - typ Type - ) - - if _, err := io.ReadFull(r, buf[:btfTypeLen]); err == io.EOF { - break - } else if err != nil { - return nil, fmt.Errorf("can't read type info for id %v: %v", id, err) - } - - if _, err := unmarshalBtfType(&header, buf[:btfTypeLen], bo); err != nil { - return nil, fmt.Errorf("can't unmarshal type info for id %v: %v", id, err) - } - - if id < firstTypeID { - return nil, fmt.Errorf("no more type IDs") - } - - name, err := rawStrings.Lookup(header.NameOff) - if err != nil { - return nil, fmt.Errorf("get name for type id %d: %w", id, err) - } - - switch header.Kind() { - case kindInt: - size := header.Size() - buf = buf[:btfIntLen] - if _, err := io.ReadFull(r, buf); err != nil { - return nil, fmt.Errorf("can't read btfInt, id: %d: %w", id, err) - } - if _, err := unmarshalBtfInt(&bInt, buf, bo); err != nil { - return nil, fmt.Errorf("can't unmarshal btfInt, id: %d: %w", id, err) - } - if bInt.Offset() > 0 || bInt.Bits().Bytes() != size { - legacyBitfields[id] = [2]Bits{bInt.Offset(), bInt.Bits()} - } - typ = &Int{name, header.Size(), bInt.Encoding()} - - case kindPointer: - ptr := &Pointer{nil} - fixup(header.Type(), &ptr.Target) - typ = ptr - - case kindArray: - buf = buf[:btfArrayLen] - if _, err := io.ReadFull(r, buf); err != nil { - return nil, fmt.Errorf("can't read btfArray, id: %d: %w", id, err) - } - if _, err := unmarshalBtfArray(&bArr, buf, bo); err != nil { - return nil, fmt.Errorf("can't unmarshal btfArray, id: %d: %w", id, err) - } - - arr := &Array{nil, nil, bArr.Nelems} - fixup(bArr.IndexType, &arr.Index) - fixup(bArr.Type, &arr.Type) - typ = arr - - case kindStruct: - vlen := header.Vlen() - bMembers = slices.Grow(bMembers[:0], vlen)[:vlen] - buf = slices.Grow(buf[:0], vlen*btfMemberLen)[:vlen*btfMemberLen] - if _, err := io.ReadFull(r, buf); err != nil { - return nil, fmt.Errorf("can't read btfMembers, id: %d: %w", id, err) - } - if _, err := unmarshalBtfMembers(bMembers, buf, bo); err != nil { - return nil, fmt.Errorf("can't unmarshal btfMembers, id: %d: %w", id, err) - } - - members, err := convertMembers(bMembers, header.Bitfield()) - if err != nil { - return nil, fmt.Errorf("struct %s (id %d): %w", name, id, err) - } - typ = &Struct{name, header.Size(), members} - - case kindUnion: - vlen := header.Vlen() - bMembers = slices.Grow(bMembers[:0], vlen)[:vlen] - buf = slices.Grow(buf[:0], vlen*btfMemberLen)[:vlen*btfMemberLen] - if _, err := io.ReadFull(r, buf); err != nil { - return nil, fmt.Errorf("can't read btfMembers, id: %d: %w", id, err) - } - if _, err := unmarshalBtfMembers(bMembers, buf, bo); err != nil { - return nil, fmt.Errorf("can't unmarshal btfMembers, id: %d: %w", id, err) - } - - members, err := convertMembers(bMembers, header.Bitfield()) - if err != nil { - return nil, fmt.Errorf("union %s (id %d): %w", name, id, err) - } - typ = &Union{name, header.Size(), members} - - case kindEnum: - vlen := header.Vlen() - bEnums = slices.Grow(bEnums[:0], vlen)[:vlen] - buf = slices.Grow(buf[:0], vlen*btfEnumLen)[:vlen*btfEnumLen] - if _, err := io.ReadFull(r, buf); err != nil { - return nil, fmt.Errorf("can't read btfEnums, id: %d: %w", id, err) - } - if _, err := unmarshalBtfEnums(bEnums, buf, bo); err != nil { - return nil, fmt.Errorf("can't unmarshal btfEnums, id: %d: %w", id, err) - } - - vals := make([]EnumValue, 0, vlen) - signed := header.Signed() - for i, btfVal := range bEnums { - name, err := rawStrings.Lookup(btfVal.NameOff) - if err != nil { - return nil, fmt.Errorf("get name for enum value %d: %s", i, err) - } - value := uint64(btfVal.Val) - if signed { - // Sign extend values to 64 bit. - value = uint64(int32(btfVal.Val)) - } - vals = append(vals, EnumValue{name, value}) - } - typ = &Enum{name, header.Size(), signed, vals} - - case kindForward: - typ = &Fwd{name, header.FwdKind()} - - case kindTypedef: - typedef := &Typedef{name, nil} - fixup(header.Type(), &typedef.Type) - typ = typedef - - case kindVolatile: - volatile := &Volatile{nil} - fixup(header.Type(), &volatile.Type) - typ = volatile - - case kindConst: - cnst := &Const{nil} - fixup(header.Type(), &cnst.Type) - typ = cnst - - case kindRestrict: - restrict := &Restrict{nil} - fixup(header.Type(), &restrict.Type) - typ = restrict - - case kindFunc: - fn := &Func{name, nil, header.Linkage()} - fixup(header.Type(), &fn.Type) - typ = fn - - case kindFuncProto: - vlen := header.Vlen() - bParams = slices.Grow(bParams[:0], vlen)[:vlen] - buf = slices.Grow(buf[:0], vlen*btfParamLen)[:vlen*btfParamLen] - if _, err := io.ReadFull(r, buf); err != nil { - return nil, fmt.Errorf("can't read btfParams, id: %d: %w", id, err) - } - if _, err := unmarshalBtfParams(bParams, buf, bo); err != nil { - return nil, fmt.Errorf("can't unmarshal btfParams, id: %d: %w", id, err) - } - - params := make([]FuncParam, 0, vlen) - for i, param := range bParams { - name, err := rawStrings.Lookup(param.NameOff) - if err != nil { - return nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err) - } - params = append(params, FuncParam{ - Name: name, - }) - } - for i := range params { - fixup(bParams[i].Type, ¶ms[i].Type) - } - - fp := &FuncProto{nil, params} - fixup(header.Type(), &fp.Return) - typ = fp - - case kindVar: - buf = buf[:btfVariableLen] - if _, err := io.ReadFull(r, buf); err != nil { - return nil, fmt.Errorf("can't read btfVariable, id: %d: %w", id, err) - } - if _, err := unmarshalBtfVariable(&bVariable, buf, bo); err != nil { - return nil, fmt.Errorf("can't read btfVariable, id: %d: %w", id, err) - } - - v := &Var{name, nil, VarLinkage(bVariable.Linkage)} - fixup(header.Type(), &v.Type) - typ = v - - case kindDatasec: - vlen := header.Vlen() - bSecInfos = slices.Grow(bSecInfos[:0], vlen)[:vlen] - buf = slices.Grow(buf[:0], vlen*btfVarSecinfoLen)[:vlen*btfVarSecinfoLen] - if _, err := io.ReadFull(r, buf); err != nil { - return nil, fmt.Errorf("can't read btfVarSecInfos, id: %d: %w", id, err) - } - if _, err := unmarshalBtfVarSecInfos(bSecInfos, buf, bo); err != nil { - return nil, fmt.Errorf("can't unmarshal btfVarSecInfos, id: %d: %w", id, err) - } - - vars := make([]VarSecinfo, 0, vlen) - for _, btfVar := range bSecInfos { - vars = append(vars, VarSecinfo{ - Offset: btfVar.Offset, - Size: btfVar.Size, - }) - } - for i := range vars { - fixup(bSecInfos[i].Type, &vars[i].Type) - } - typ = &Datasec{name, header.Size(), vars} - - case kindFloat: - typ = &Float{name, header.Size()} - - case kindDeclTag: - buf = buf[:btfDeclTagLen] - if _, err := io.ReadFull(r, buf); err != nil { - return nil, fmt.Errorf("can't read btfDeclTag, id: %d: %w", id, err) - } - if _, err := unmarshalBtfDeclTag(&bDeclTag, buf, bo); err != nil { - return nil, fmt.Errorf("can't read btfDeclTag, id: %d: %w", id, err) - } - - btfIndex := bDeclTag.ComponentIdx - if uint64(btfIndex) > math.MaxInt { - return nil, fmt.Errorf("type id %d: index exceeds int", id) - } - - dt := &declTag{nil, name, int(int32(btfIndex))} - fixup(header.Type(), &dt.Type) - typ = dt - - declTags = append(declTags, dt) - - case kindTypeTag: - tt := &typeTag{nil, name} - fixup(header.Type(), &tt.Type) - typ = tt - - case kindEnum64: - vlen := header.Vlen() - bEnums64 = slices.Grow(bEnums64[:0], vlen)[:vlen] - buf = slices.Grow(buf[:0], vlen*btfEnum64Len)[:vlen*btfEnum64Len] - if _, err := io.ReadFull(r, buf); err != nil { - return nil, fmt.Errorf("can't read btfEnum64s, id: %d: %w", id, err) - } - if _, err := unmarshalBtfEnums64(bEnums64, buf, bo); err != nil { - return nil, fmt.Errorf("can't unmarshal btfEnum64s, id: %d: %w", id, err) - } - - vals := make([]EnumValue, 0, vlen) - for i, btfVal := range bEnums64 { - name, err := rawStrings.Lookup(btfVal.NameOff) - if err != nil { - return nil, fmt.Errorf("get name for enum64 value %d: %s", i, err) - } - value := (uint64(btfVal.ValHi32) << 32) | uint64(btfVal.ValLo32) - vals = append(vals, EnumValue{name, value}) - } - typ = &Enum{name, header.Size(), header.Signed(), vals} - - default: - return nil, fmt.Errorf("type id %d: unknown kind: %v", id, header.Kind()) - } - - types = append(types, typ) - } - - for _, fixup := range fixups { - if fixup.id < firstTypeID { - return nil, fmt.Errorf("fixup for base type id %d is not expected", fixup.id) - } - - idx := int(fixup.id - firstTypeID) - if idx >= len(types) { - return nil, fmt.Errorf("reference to invalid type id: %d", fixup.id) - } - - *fixup.typ = types[idx] - } - - for _, bitfieldFixup := range bitfieldFixups { - if bitfieldFixup.id < firstTypeID { - return nil, fmt.Errorf("bitfield fixup from split to base types is not expected") - } - - data, ok := legacyBitfields[bitfieldFixup.id] - if ok { - // This is indeed a legacy bitfield, fix it up. - bitfieldFixup.m.Offset += data[0] - bitfieldFixup.m.BitfieldSize = data[1] - } - } - - for _, dt := range declTags { - switch t := dt.Type.(type) { - case *Var, *Typedef: - if dt.Index != -1 { - return nil, fmt.Errorf("type %s: index %d is not -1", dt, dt.Index) - } - - case composite: - if dt.Index >= len(t.members()) { - return nil, fmt.Errorf("type %s: index %d exceeds members of %s", dt, dt.Index, t) - } - - case *Func: - fp, ok := t.Type.(*FuncProto) - if !ok { - return nil, fmt.Errorf("type %s: %s is not a FuncProto", dt, t.Type) - } - - if dt.Index >= len(fp.Params) { - return nil, fmt.Errorf("type %s: index %d exceeds params of %s", dt, dt.Index, t) - } - - default: - return nil, fmt.Errorf("type %s: decl tag for type %s is not supported", dt, t) - } - } - - return types, nil -} - // essentialName represents the name of a BTF type stripped of any flavor // suffixes after a ___ delimiter. type essentialName string @@ -1207,6 +784,20 @@ func UnderlyingType(typ Type) Type { return &cycle{typ} } +// QualifiedType returns the type with all qualifiers removed. +func QualifiedType(typ Type) Type { + result := typ + for depth := 0; depth <= maxResolveDepth; depth++ { + switch v := (result).(type) { + case qualifier: + result = v.qualify() + default: + return result + } + } + return &cycle{typ} +} + // As returns typ if is of type T. Otherwise it peels qualifiers and Typedefs // until it finds a T. // diff --git a/src/runtime/vendor/github.com/cilium/ebpf/btf/unmarshal.go b/src/runtime/vendor/github.com/cilium/ebpf/btf/unmarshal.go new file mode 100644 index 000000000000..fdcf583da660 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/btf/unmarshal.go @@ -0,0 +1,780 @@ +package btf + +import ( + "bytes" + "encoding/binary" + "fmt" + "hash/maphash" + "io" + "iter" + "maps" + "math" + "slices" + "sync" +) + +type decoder struct { + // Immutable fields, may be shared. + + base *decoder + byteOrder binary.ByteOrder + raw []byte + strings *stringTable + // The ID for offsets[0]. + firstTypeID TypeID + // Map from TypeID to offset of the marshaled data in raw. Contains an entry + // for each TypeID, including 0 aka Void. The offset for Void is invalid. + offsets []int + declTags map[TypeID][]TypeID + // An index from essentialName to TypeID. + namedTypes *fuzzyStringIndex + + // Protection for mutable fields below. + mu sync.Mutex + types map[TypeID]Type + typeIDs map[Type]TypeID + legacyBitfields map[TypeID][2]Bits // offset, size +} + +func newDecoder(raw []byte, bo binary.ByteOrder, strings *stringTable, base *decoder) (*decoder, error) { + firstTypeID := TypeID(0) + if base != nil { + if base.byteOrder != bo { + return nil, fmt.Errorf("can't use %v base with %v split BTF", base.byteOrder, bo) + } + + if base.firstTypeID != 0 { + return nil, fmt.Errorf("can't use split BTF as base") + } + + firstTypeID = TypeID(len(base.offsets)) + } + + var header btfType + var numTypes, numDeclTags, numNamedTypes int + + for _, err := range allBtfTypeOffsets(raw, bo, &header) { + if err != nil { + return nil, err + } + + numTypes++ + + if header.Kind() == kindDeclTag { + numDeclTags++ + } + + if header.NameOff != 0 { + numNamedTypes++ + } + } + + if firstTypeID == 0 { + // Allocate an extra slot for Void so we don't have to deal with + // constant off by one issues. + numTypes++ + } + + offsets := make([]int, 0, numTypes) + declTags := make(map[TypeID][]TypeID, numDeclTags) + namedTypes := newFuzzyStringIndex(numNamedTypes) + + if firstTypeID == 0 { + // Add a sentinel for Void. + offsets = append(offsets, math.MaxInt) + } + + id := firstTypeID + TypeID(len(offsets)) + for offset := range allBtfTypeOffsets(raw, bo, &header) { + if id < firstTypeID { + return nil, fmt.Errorf("no more type IDs") + } + + offsets = append(offsets, offset) + + if header.Kind() == kindDeclTag { + declTags[header.Type()] = append(declTags[header.Type()], id) + } + + // Build named type index. + name, err := strings.LookupBytes(header.NameOff) + if err != nil { + return nil, fmt.Errorf("lookup type name for id %v: %w", id, err) + } + + if len(name) > 0 { + if i := bytes.Index(name, []byte("___")); i != -1 { + // Flavours are rare. It's cheaper to find the first index for some + // reason. + i = bytes.LastIndex(name, []byte("___")) + name = name[:i] + } + + namedTypes.Add(name, id) + } + + id++ + } + + namedTypes.Build() + + return &decoder{ + base, + bo, + raw, + strings, + firstTypeID, + offsets, + declTags, + namedTypes, + sync.Mutex{}, + make(map[TypeID]Type), + make(map[Type]TypeID), + make(map[TypeID][2]Bits), + }, nil +} + +func allBtfTypeOffsets(buf []byte, bo binary.ByteOrder, header *btfType) iter.Seq2[int, error] { + return func(yield func(int, error) bool) { + for offset := 0; offset < len(buf); { + start := offset + + n, err := unmarshalBtfType(header, buf[offset:], bo) + if err != nil { + yield(-1, fmt.Errorf("unmarshal type header: %w", err)) + return + } + offset += n + + n, err = header.DataLen() + if err != nil { + yield(-1, err) + return + } + offset += n + + if offset > len(buf) { + yield(-1, fmt.Errorf("auxiliary type data: %w", io.ErrUnexpectedEOF)) + return + } + + if !yield(start, nil) { + return + } + } + } +} + +func rebaseDecoder(d *decoder, base *decoder) (*decoder, error) { + if d.base == nil { + return nil, fmt.Errorf("rebase split spec: not a split spec") + } + + if len(d.base.raw) != len(base.raw) || (len(d.base.raw) > 0 && &d.base.raw[0] != &base.raw[0]) { + return nil, fmt.Errorf("rebase split spec: raw BTF differs") + } + + return &decoder{ + base, + d.byteOrder, + d.raw, + d.strings, + d.firstTypeID, + d.offsets, + d.declTags, + d.namedTypes, + sync.Mutex{}, + make(map[TypeID]Type), + make(map[Type]TypeID), + make(map[TypeID][2]Bits), + }, nil +} + +// Copy performs a deep copy of a decoder and its base. +func (d *decoder) Copy() *decoder { + if d == nil { + return nil + } + + return d.copy(nil) +} + +func (d *decoder) copy(copiedTypes map[Type]Type) *decoder { + if d == nil { + return nil + } + + d.mu.Lock() + defer d.mu.Unlock() + + if copiedTypes == nil { + copiedTypes = make(map[Type]Type, len(d.types)) + } + + types := make(map[TypeID]Type, len(d.types)) + typeIDs := make(map[Type]TypeID, len(d.typeIDs)) + for id, typ := range d.types { + types[id] = copyType(typ, d.typeIDs, copiedTypes, typeIDs) + } + + return &decoder{ + d.base.copy(copiedTypes), + d.byteOrder, + d.raw, + d.strings, + d.firstTypeID, + d.offsets, + d.declTags, + d.namedTypes, + sync.Mutex{}, + types, + typeIDs, + maps.Clone(d.legacyBitfields), + } +} + +// TypeID returns the ID for a Type previously obtained via [TypeByID]. +func (d *decoder) TypeID(typ Type) (TypeID, error) { + if _, ok := typ.(*Void); ok { + // Equality is weird for void, since it is a zero sized type. + return 0, nil + } + + d.mu.Lock() + defer d.mu.Unlock() + + id, ok := d.typeIDs[typ] + if !ok { + return 0, fmt.Errorf("no ID for type %s: %w", typ, ErrNotFound) + } + + return id, nil +} + +// TypesByName returns all types which have the given essential name. +// +// Returns ErrNotFound if no matching Type exists. +func (d *decoder) TypesByName(name essentialName) ([]Type, error) { + var types []Type + for id := range d.namedTypes.Find(string(name)) { + typ, err := d.TypeByID(id) + if err != nil { + return nil, err + } + + if newEssentialName(typ.TypeName()) == name { + // Deal with hash collisions by checking against the name. + types = append(types, typ) + } + } + + if len(types) == 0 { + // Return an unwrapped error because this is on the hot path + // for CO-RE. + return nil, ErrNotFound + } + + return types, nil +} + +// TypeByID decodes a type and any of its descendants. +func (d *decoder) TypeByID(id TypeID) (Type, error) { + d.mu.Lock() + defer d.mu.Unlock() + + return d.inflateType(id) +} + +func (d *decoder) inflateType(id TypeID) (typ Type, err error) { + defer func() { + if r := recover(); r != nil { + err = r.(error) + } + + // err is the return value of the enclosing function, even if an explicit + // return is used. + // See https://go.dev/ref/spec#Defer_statements + if err != nil { + // Remove partially inflated type so that d.types only contains + // fully inflated ones. + delete(d.types, id) + } else { + // Populate reverse index. + d.typeIDs[typ] = id + } + }() + + if id < d.firstTypeID { + return d.base.inflateType(id) + } + + if id == 0 { + // Void is defined to always be type ID 0, and is thus omitted from BTF. + // Fast-path because it is looked up frequently. + return (*Void)(nil), nil + } + + if typ, ok := d.types[id]; ok { + return typ, nil + } + + fixup := func(id TypeID, typ *Type) { + fixup, err := d.inflateType(id) + if err != nil { + panic(err) + } + *typ = fixup + } + + convertMembers := func(header *btfType, buf []byte) ([]Member, error) { + var bm btfMember + members := make([]Member, 0, header.Vlen()) + for i := range header.Vlen() { + n, err := unmarshalBtfMember(&bm, buf, d.byteOrder) + if err != nil { + return nil, fmt.Errorf("unmarshal member: %w", err) + } + buf = buf[n:] + + name, err := d.strings.Lookup(bm.NameOff) + if err != nil { + return nil, fmt.Errorf("can't get name for member %d: %w", i, err) + } + + members = append(members, Member{ + Name: name, + Offset: Bits(bm.Offset), + }) + + m := &members[i] + fixup(bm.Type, &m.Type) + + if header.Bitfield() { + m.BitfieldSize = Bits(bm.Offset >> 24) + m.Offset &= 0xffffff + // We ignore legacy bitfield definitions if the current composite + // is a new-style bitfield. This is kind of safe since offset and + // size on the type of the member must be zero if kindFlat is set + // according to spec. + continue + } + + // This may be a legacy bitfield, try to fix it up. + data, ok := d.legacyBitfields[bm.Type] + if ok { + // Bingo! + m.Offset += data[0] + m.BitfieldSize = data[1] + continue + } + } + return members, nil + } + + idx := int(id - d.firstTypeID) + if idx >= len(d.offsets) { + return nil, fmt.Errorf("type id %v: %w", id, ErrNotFound) + } + + offset := d.offsets[idx] + if offset >= len(d.raw) { + return nil, fmt.Errorf("offset out of bounds") + } + + var ( + header btfType + bInt btfInt + bArr btfArray + bVariable btfVariable + bDeclTag btfDeclTag + pos = d.raw[offset:] + ) + + { + if n, err := unmarshalBtfType(&header, pos, d.byteOrder); err != nil { + return nil, fmt.Errorf("can't unmarshal type info for id %v: %v", id, err) + } else { + pos = pos[n:] + } + + name, err := d.strings.Lookup(header.NameOff) + if err != nil { + return nil, fmt.Errorf("get name for type id %d: %w", id, err) + } + + switch header.Kind() { + case kindInt: + size := header.Size() + if _, err := unmarshalBtfInt(&bInt, pos, d.byteOrder); err != nil { + return nil, fmt.Errorf("can't unmarshal btfInt, id: %d: %w", id, err) + } + if bInt.Offset() > 0 || bInt.Bits().Bytes() != size { + d.legacyBitfields[id] = [2]Bits{bInt.Offset(), bInt.Bits()} + } + typ = &Int{name, header.Size(), bInt.Encoding()} + d.types[id] = typ + + case kindPointer: + ptr := &Pointer{nil} + d.types[id] = ptr + + fixup(header.Type(), &ptr.Target) + typ = ptr + + case kindArray: + if _, err := unmarshalBtfArray(&bArr, pos, d.byteOrder); err != nil { + return nil, fmt.Errorf("can't unmarshal btfArray, id: %d: %w", id, err) + } + + arr := &Array{nil, nil, bArr.Nelems} + d.types[id] = arr + + fixup(bArr.IndexType, &arr.Index) + fixup(bArr.Type, &arr.Type) + typ = arr + + case kindStruct: + str := &Struct{name, header.Size(), nil, nil} + d.types[id] = str + typ = str + + str.Members, err = convertMembers(&header, pos) + if err != nil { + return nil, fmt.Errorf("struct %s (id %d): %w", name, id, err) + } + + case kindUnion: + uni := &Union{name, header.Size(), nil, nil} + d.types[id] = uni + typ = uni + + uni.Members, err = convertMembers(&header, pos) + if err != nil { + return nil, fmt.Errorf("union %s (id %d): %w", name, id, err) + } + + case kindEnum: + enum := &Enum{name, header.Size(), header.Signed(), nil} + d.types[id] = enum + typ = enum + + var be btfEnum + enum.Values = make([]EnumValue, 0, header.Vlen()) + for i := range header.Vlen() { + n, err := unmarshalBtfEnum(&be, pos, d.byteOrder) + if err != nil { + return nil, fmt.Errorf("unmarshal btfEnum %d, id: %d: %w", i, id, err) + } + pos = pos[n:] + + name, err := d.strings.Lookup(be.NameOff) + if err != nil { + return nil, fmt.Errorf("get name for enum value %d: %s", i, err) + } + + value := uint64(be.Val) + if enum.Signed { + // Sign extend values to 64 bit. + value = uint64(int32(be.Val)) + } + enum.Values = append(enum.Values, EnumValue{name, value}) + } + + case kindForward: + typ = &Fwd{name, header.FwdKind()} + d.types[id] = typ + + case kindTypedef: + typedef := &Typedef{name, nil, nil} + d.types[id] = typedef + + fixup(header.Type(), &typedef.Type) + typ = typedef + + case kindVolatile: + volatile := &Volatile{nil} + d.types[id] = volatile + + fixup(header.Type(), &volatile.Type) + typ = volatile + + case kindConst: + cnst := &Const{nil} + d.types[id] = cnst + + fixup(header.Type(), &cnst.Type) + typ = cnst + + case kindRestrict: + restrict := &Restrict{nil} + d.types[id] = restrict + + fixup(header.Type(), &restrict.Type) + typ = restrict + + case kindFunc: + fn := &Func{name, nil, header.Linkage(), nil, nil} + d.types[id] = fn + + fixup(header.Type(), &fn.Type) + typ = fn + + case kindFuncProto: + fp := &FuncProto{} + d.types[id] = fp + + params := make([]FuncParam, 0, header.Vlen()) + var bParam btfParam + for i := range header.Vlen() { + n, err := unmarshalBtfParam(&bParam, pos, d.byteOrder) + if err != nil { + return nil, fmt.Errorf("can't unmarshal btfParam %d, id: %d: %w", i, id, err) + } + pos = pos[n:] + + name, err := d.strings.Lookup(bParam.NameOff) + if err != nil { + return nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err) + } + + param := FuncParam{Name: name} + fixup(bParam.Type, ¶m.Type) + params = append(params, param) + } + + fixup(header.Type(), &fp.Return) + fp.Params = params + typ = fp + + case kindVar: + if _, err := unmarshalBtfVariable(&bVariable, pos, d.byteOrder); err != nil { + return nil, fmt.Errorf("can't read btfVariable, id: %d: %w", id, err) + } + + v := &Var{name, nil, VarLinkage(bVariable.Linkage), nil} + d.types[id] = v + + fixup(header.Type(), &v.Type) + typ = v + + case kindDatasec: + ds := &Datasec{name, header.Size(), nil} + d.types[id] = ds + + vlen := header.Vlen() + vars := make([]VarSecinfo, 0, vlen) + var bSecInfo btfVarSecinfo + for i := 0; i < vlen; i++ { + n, err := unmarshalBtfVarSecInfo(&bSecInfo, pos, d.byteOrder) + if err != nil { + return nil, fmt.Errorf("can't unmarshal btfVarSecinfo %d, id: %d: %w", i, id, err) + } + pos = pos[n:] + + vs := VarSecinfo{ + Offset: bSecInfo.Offset, + Size: bSecInfo.Size, + } + fixup(bSecInfo.Type, &vs.Type) + vars = append(vars, vs) + } + ds.Vars = vars + typ = ds + + case kindFloat: + typ = &Float{name, header.Size()} + d.types[id] = typ + + case kindDeclTag: + if _, err := unmarshalBtfDeclTag(&bDeclTag, pos, d.byteOrder); err != nil { + return nil, fmt.Errorf("can't read btfDeclTag, id: %d: %w", id, err) + } + + btfIndex := bDeclTag.ComponentIdx + if uint64(btfIndex) > math.MaxInt { + return nil, fmt.Errorf("type id %d: index exceeds int", id) + } + + dt := &declTag{nil, name, int(int32(btfIndex))} + d.types[id] = dt + + fixup(header.Type(), &dt.Type) + typ = dt + + case kindTypeTag: + tt := &TypeTag{nil, name} + d.types[id] = tt + + fixup(header.Type(), &tt.Type) + typ = tt + + case kindEnum64: + enum := &Enum{name, header.Size(), header.Signed(), nil} + d.types[id] = enum + typ = enum + + enum.Values = make([]EnumValue, 0, header.Vlen()) + var bEnum64 btfEnum64 + for i := range header.Vlen() { + n, err := unmarshalBtfEnum64(&bEnum64, pos, d.byteOrder) + if err != nil { + return nil, fmt.Errorf("can't unmarshal btfEnum64 %d, id: %d: %w", i, id, err) + } + pos = pos[n:] + + name, err := d.strings.Lookup(bEnum64.NameOff) + if err != nil { + return nil, fmt.Errorf("get name for enum64 value %d: %s", i, err) + } + value := (uint64(bEnum64.ValHi32) << 32) | uint64(bEnum64.ValLo32) + enum.Values = append(enum.Values, EnumValue{name, value}) + } + + default: + return nil, fmt.Errorf("type id %d: unknown kind: %v", id, header.Kind()) + } + } + + for _, tagID := range d.declTags[id] { + dtType, err := d.inflateType(tagID) + if err != nil { + return nil, err + } + + dt, ok := dtType.(*declTag) + if !ok { + return nil, fmt.Errorf("type id %v: not a declTag", tagID) + } + + switch t := typ.(type) { + case *Var: + if dt.Index != -1 { + return nil, fmt.Errorf("type %s: component idx %d is not -1", dt, dt.Index) + } + t.Tags = append(t.Tags, dt.Value) + + case *Typedef: + if dt.Index != -1 { + return nil, fmt.Errorf("type %s: component idx %d is not -1", dt, dt.Index) + } + t.Tags = append(t.Tags, dt.Value) + + case composite: + if dt.Index >= 0 { + members := t.members() + if dt.Index >= len(members) { + return nil, fmt.Errorf("type %s: component idx %d exceeds members of %s", dt, dt.Index, t) + } + + members[dt.Index].Tags = append(members[dt.Index].Tags, dt.Value) + } else if dt.Index == -1 { + switch t2 := t.(type) { + case *Struct: + t2.Tags = append(t2.Tags, dt.Value) + case *Union: + t2.Tags = append(t2.Tags, dt.Value) + } + } else { + return nil, fmt.Errorf("type %s: decl tag for type %s has invalid component idx", dt, t) + } + + case *Func: + fp, ok := t.Type.(*FuncProto) + if !ok { + return nil, fmt.Errorf("type %s: %s is not a FuncProto", dt, t.Type) + } + + // Ensure the number of argument tag lists equals the number of arguments + if len(t.ParamTags) == 0 { + t.ParamTags = make([][]string, len(fp.Params)) + } + + if dt.Index >= 0 { + if dt.Index >= len(fp.Params) { + return nil, fmt.Errorf("type %s: component idx %d exceeds params of %s", dt, dt.Index, t) + } + + t.ParamTags[dt.Index] = append(t.ParamTags[dt.Index], dt.Value) + } else if dt.Index == -1 { + t.Tags = append(t.Tags, dt.Value) + } else { + return nil, fmt.Errorf("type %s: decl tag for type %s has invalid component idx", dt, t) + } + + default: + return nil, fmt.Errorf("type %s: decl tag for type %s is not supported", dt, t) + } + } + + return typ, nil +} + +// An index from string to TypeID. +// +// Fuzzy because it may return false positive matches. +type fuzzyStringIndex struct { + seed maphash.Seed + entries []fuzzyStringIndexEntry +} + +func newFuzzyStringIndex(capacity int) *fuzzyStringIndex { + return &fuzzyStringIndex{ + maphash.MakeSeed(), + make([]fuzzyStringIndexEntry, 0, capacity), + } +} + +// Add a string to the index. +// +// Calling the method with identical arguments will create duplicate entries. +func (idx *fuzzyStringIndex) Add(name []byte, id TypeID) { + hash := uint32(maphash.Bytes(idx.seed, name)) + idx.entries = append(idx.entries, newFuzzyStringIndexEntry(hash, id)) +} + +// Build the index. +// +// Must be called after [Add] and before [Match]. +func (idx *fuzzyStringIndex) Build() { + slices.Sort(idx.entries) +} + +// Find TypeIDs which may match the name. +// +// May return false positives, but is guaranteed to not have false negatives. +// +// You must call [Build] at least once before calling this method. +func (idx *fuzzyStringIndex) Find(name string) iter.Seq[TypeID] { + return func(yield func(TypeID) bool) { + hash := uint32(maphash.String(idx.seed, name)) + + // We match only on the first 32 bits here, so ignore found. + i, _ := slices.BinarySearch(idx.entries, fuzzyStringIndexEntry(hash)<<32) + for i := i; i < len(idx.entries); i++ { + if idx.entries[i].hash() != hash { + break + } + + if !yield(idx.entries[i].id()) { + return + } + } + } +} + +// Tuple mapping the hash of an essential name to a type. +// +// Encoded in an uint64 so that it implements cmp.Ordered. +type fuzzyStringIndexEntry uint64 + +func newFuzzyStringIndexEntry(hash uint32, id TypeID) fuzzyStringIndexEntry { + return fuzzyStringIndexEntry(hash)<<32 | fuzzyStringIndexEntry(id) +} + +func (e fuzzyStringIndexEntry) hash() uint32 { + return uint32(e >> 32) +} + +func (e fuzzyStringIndexEntry) id() TypeID { + return TypeID(e) +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/btf/workarounds.go b/src/runtime/vendor/github.com/cilium/ebpf/btf/workarounds.go index 12a89b87eedb..eb09047fb30f 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/btf/workarounds.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/btf/workarounds.go @@ -12,7 +12,7 @@ func datasecResolveWorkaround(b *Builder, ds *Datasec) error { } switch v.Type.(type) { - case *Typedef, *Volatile, *Const, *Restrict, *typeTag: + case *Typedef, *Volatile, *Const, *Restrict, *TypeTag: // NB: We must never call Add on a Datasec, otherwise we risk // infinite recursion. _, err := b.Add(v.Type) diff --git a/src/runtime/vendor/github.com/cilium/ebpf/collection.go b/src/runtime/vendor/github.com/cilium/ebpf/collection.go index b2cb214adce0..77476bd67391 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/collection.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/collection.go @@ -4,14 +4,18 @@ import ( "encoding/binary" "errors" "fmt" + "path/filepath" "reflect" "strings" "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/btf" "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/kallsyms" "github.com/cilium/ebpf/internal/kconfig" - "github.com/cilium/ebpf/internal/sysenc" + "github.com/cilium/ebpf/internal/linux" + "github.com/cilium/ebpf/internal/platform" + "github.com/cilium/ebpf/internal/sys" ) // CollectionOptions control loading a collection into the kernel. @@ -38,6 +42,11 @@ type CollectionSpec struct { Maps map[string]*MapSpec Programs map[string]*ProgramSpec + // Variables refer to global variables declared in the ELF. They can be read + // and modified freely before loading the Collection. Modifying them after + // loading has no effect on a running eBPF program. + Variables map[string]*VariableSpec + // Types holds type information about Maps and Programs. // Modifications to Types are currently undefined behaviour. Types *btf.Spec @@ -54,23 +63,36 @@ func (cs *CollectionSpec) Copy() *CollectionSpec { } cpy := CollectionSpec{ - Maps: make(map[string]*MapSpec, len(cs.Maps)), - Programs: make(map[string]*ProgramSpec, len(cs.Programs)), + Maps: copyMapOfSpecs(cs.Maps), + Programs: copyMapOfSpecs(cs.Programs), + Variables: make(map[string]*VariableSpec, len(cs.Variables)), ByteOrder: cs.ByteOrder, Types: cs.Types.Copy(), } - for name, spec := range cs.Maps { - cpy.Maps[name] = spec.Copy() + for name, spec := range cs.Variables { + cpy.Variables[name] = spec.copy(&cpy) } - - for name, spec := range cs.Programs { - cpy.Programs[name] = spec.Copy() + if cs.Variables == nil { + cpy.Variables = nil } return &cpy } +func copyMapOfSpecs[T interface{ Copy() T }](m map[string]T) map[string]T { + if m == nil { + return nil + } + + cpy := make(map[string]T, len(m)) + for k, v := range m { + cpy[k] = v.Copy() + } + + return cpy +} + // RewriteMaps replaces all references to specific maps. // // Use this function to use pre-existing maps instead of creating new ones @@ -134,65 +156,24 @@ func (m *MissingConstantsError) Error() string { // From Linux 5.5 the verifier will use constants to eliminate dead code. // // Returns an error wrapping [MissingConstantsError] if a constant doesn't exist. +// +// Deprecated: Use [CollectionSpec.Variables] to interact with constants instead. +// RewriteConstants is now a wrapper around the VariableSpec API. func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error { - replaced := make(map[string]bool) - - for name, spec := range cs.Maps { - if !strings.HasPrefix(name, ".rodata") { - continue - } - - b, ds, err := spec.dataSection() - if errors.Is(err, errMapNoBTFValue) { - // Data sections without a BTF Datasec are valid, but don't support - // constant replacements. + var missing []string + for n, c := range consts { + v, ok := cs.Variables[n] + if !ok { + missing = append(missing, n) continue } - if err != nil { - return fmt.Errorf("map %s: %w", name, err) - } - - // MapSpec.Copy() performs a shallow copy. Fully copy the byte slice - // to avoid any changes affecting other copies of the MapSpec. - cpy := make([]byte, len(b)) - copy(cpy, b) - - for _, v := range ds.Vars { - vname := v.Type.TypeName() - replacement, ok := consts[vname] - if !ok { - continue - } - - if _, ok := v.Type.(*btf.Var); !ok { - return fmt.Errorf("section %s: unexpected type %T for variable %s", name, v.Type, vname) - } - - if replaced[vname] { - return fmt.Errorf("section %s: duplicate variable %s", name, vname) - } - - if int(v.Offset+v.Size) > len(cpy) { - return fmt.Errorf("section %s: offset %d(+%d) for variable %s is out of bounds", name, v.Offset, v.Size, vname) - } - - b, err := sysenc.Marshal(replacement, int(v.Size)) - if err != nil { - return fmt.Errorf("marshaling constant replacement %s: %w", vname, err) - } - - b.CopyTo(cpy[v.Offset : v.Offset+v.Size]) - replaced[vname] = true + if !v.Constant() { + return fmt.Errorf("variable %s is not a constant", n) } - spec.Contents[0] = MapKV{Key: uint32(0), Value: cpy} - } - - var missing []string - for c := range consts { - if !replaced[c] { - missing = append(missing, c) + if err := v.Set(c); err != nil { + return fmt.Errorf("rewriting constant %s: %w", n, err) } } @@ -210,25 +191,23 @@ func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error // if this sounds useful. // // 'to' must be a pointer to a struct. A field of the -// struct is updated with values from Programs or Maps if it -// has an `ebpf` tag and its type is *ProgramSpec or *MapSpec. +// struct is updated with values from Programs, Maps or Variables if it +// has an `ebpf` tag and its type is *ProgramSpec, *MapSpec or *VariableSpec. // The tag's value specifies the name of the program or map as // found in the CollectionSpec. // // struct { -// Foo *ebpf.ProgramSpec `ebpf:"xdp_foo"` -// Bar *ebpf.MapSpec `ebpf:"bar_map"` +// Foo *ebpf.ProgramSpec `ebpf:"xdp_foo"` +// Bar *ebpf.MapSpec `ebpf:"bar_map"` +// Var *ebpf.VariableSpec `ebpf:"some_var"` // Ignored int // } // // Returns an error if any of the eBPF objects can't be found, or -// if the same MapSpec or ProgramSpec is assigned multiple times. +// if the same Spec is assigned multiple times. func (cs *CollectionSpec) Assign(to interface{}) error { - // Assign() only supports assigning ProgramSpecs and MapSpecs, - // so doesn't load any resources into the kernel. getValue := func(typ reflect.Type, name string) (interface{}, error) { switch typ { - case reflect.TypeOf((*ProgramSpec)(nil)): if p := cs.Programs[name]; p != nil { return p, nil @@ -241,6 +220,12 @@ func (cs *CollectionSpec) Assign(to interface{}) error { } return nil, fmt.Errorf("missing map %q", name) + case reflect.TypeOf((*VariableSpec)(nil)): + if v := cs.Variables[name]; v != nil { + return v, nil + } + return nil, fmt.Errorf("missing variable %q", name) + default: return nil, fmt.Errorf("unsupported type %s", typ) } @@ -286,6 +271,7 @@ func (cs *CollectionSpec) LoadAndAssign(to interface{}, opts *CollectionOptions) // Support assigning Programs and Maps, lazy-loading the required objects. assignedMaps := make(map[string]bool) assignedProgs := make(map[string]bool) + assignedVars := make(map[string]bool) getValue := func(typ reflect.Type, name string) (interface{}, error) { switch typ { @@ -298,6 +284,10 @@ func (cs *CollectionSpec) LoadAndAssign(to interface{}, opts *CollectionOptions) assignedMaps[name] = true return loader.loadMap(name) + case reflect.TypeOf((*Variable)(nil)): + assignedVars[name] = true + return loader.loadVariable(name) + default: return nil, fmt.Errorf("unsupported type %s", typ) } @@ -315,8 +305,7 @@ func (cs *CollectionSpec) LoadAndAssign(to interface{}, opts *CollectionOptions) // Evaluate the loader's objects after all (lazy)loading has taken place. for n, m := range loader.maps { - switch m.typ { - case ProgramArray: + if m.typ.canStoreProgram() { // Require all lazy-loaded ProgramArrays to be assigned to the given object. // The kernel empties a ProgramArray once the last user space reference // to it closes, which leads to failed tail calls. Combined with the library @@ -338,15 +327,22 @@ func (cs *CollectionSpec) LoadAndAssign(to interface{}, opts *CollectionOptions) for p := range assignedProgs { delete(loader.programs, p) } + for p := range assignedVars { + delete(loader.vars, p) + } return nil } -// Collection is a collection of Programs and Maps associated -// with their symbols +// Collection is a collection of live BPF resources present in the kernel. type Collection struct { Programs map[string]*Program Maps map[string]*Map + + // Variables contains global variables used by the Collection's program(s). On + // kernels older than 5.5, most interactions with Variables return + // [ErrNotSupported]. + Variables map[string]*Variable } // NewCollection creates a Collection from the given spec, creating and @@ -387,19 +383,26 @@ func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Co } } + for varName := range spec.Variables { + if _, err := loader.loadVariable(varName); err != nil { + return nil, err + } + } + // Maps can contain Program and Map stubs, so populate them after // all Maps and Programs have been successfully loaded. if err := loader.populateDeferredMaps(); err != nil { return nil, err } - // Prevent loader.cleanup from closing maps and programs. - maps, progs := loader.maps, loader.programs - loader.maps, loader.programs = nil, nil + // Prevent loader.cleanup from closing maps, programs and vars. + maps, progs, vars := loader.maps, loader.programs, loader.vars + loader.maps, loader.programs, loader.vars = nil, nil, nil return &Collection{ progs, maps, + vars, }, nil } @@ -408,6 +411,8 @@ type collectionLoader struct { opts *CollectionOptions maps map[string]*Map programs map[string]*Program + vars map[string]*Variable + types *btf.Cache } func newCollectionLoader(coll *CollectionSpec, opts *CollectionOptions) (*collectionLoader, error) { @@ -416,15 +421,14 @@ func newCollectionLoader(coll *CollectionSpec, opts *CollectionOptions) (*collec } // Check for existing MapSpecs in the CollectionSpec for all provided replacement maps. - for name, m := range opts.MapReplacements { - spec, ok := coll.Maps[name] - if !ok { + for name := range opts.MapReplacements { + if _, ok := coll.Maps[name]; !ok { return nil, fmt.Errorf("replacement map %s not found in CollectionSpec", name) } + } - if err := spec.Compatible(m); err != nil { - return nil, fmt.Errorf("using replacement map %s: %w", spec.Name, err) - } + if err := populateKallsyms(coll.Programs); err != nil { + return nil, fmt.Errorf("populating kallsyms caches: %w", err) } return &collectionLoader{ @@ -432,9 +436,50 @@ func newCollectionLoader(coll *CollectionSpec, opts *CollectionOptions) (*collec opts, make(map[string]*Map), make(map[string]*Program), + make(map[string]*Variable), + newBTFCache(&opts.Programs), }, nil } +// populateKallsyms populates kallsyms caches, making lookups cheaper later on +// during individual program loading. Since we have less context available +// at those stages, we batch the lookups here instead to avoid redundant work. +func populateKallsyms(progs map[string]*ProgramSpec) error { + // Look up associated kernel modules for all symbols referenced by + // ProgramSpec.AttachTo for program types that support attaching to kmods. + mods := make(map[string]string) + for _, p := range progs { + if p.AttachTo != "" && p.targetsKernelModule() { + mods[p.AttachTo] = "" + } + } + if len(mods) != 0 { + if err := kallsyms.AssignModules(mods); err != nil { + return fmt.Errorf("getting modules from kallsyms: %w", err) + } + } + + // Look up addresses of all kernel symbols referenced by all programs. + addrs := make(map[string]uint64) + for _, p := range progs { + iter := p.Instructions.Iterate() + for iter.Next() { + ins := iter.Ins + meta, _ := ins.Metadata.Get(ksymMetaKey{}).(*ksymMeta) + if meta != nil { + addrs[meta.Name] = 0 + } + } + } + if len(addrs) != 0 { + if err := kallsyms.AssignAddresses(addrs); err != nil { + return fmt.Errorf("getting addresses from kallsyms: %w", err) + } + } + + return nil +} + // close all resources left over in the collectionLoader. func (cl *collectionLoader) close() { for _, m := range cl.maps { @@ -455,7 +500,22 @@ func (cl *collectionLoader) loadMap(mapName string) (*Map, error) { return nil, fmt.Errorf("missing map %s", mapName) } + mapSpec = mapSpec.Copy() + + // Defer setting the mmapable flag on maps until load time. This avoids the + // MapSpec having different flags on some kernel versions. Also avoid running + // syscalls during ELF loading, so platforms like wasm can also parse an ELF. + if isDataSection(mapSpec.Name) && haveMmapableMaps() == nil { + mapSpec.Flags |= sys.BPF_F_MMAPABLE + } + if replaceMap, ok := cl.opts.MapReplacements[mapName]; ok { + // Check compatibility with the replacement map after setting + // feature-dependent map flags. + if err := mapSpec.Compatible(replaceMap); err != nil { + return nil, fmt.Errorf("using replacement map %s: %w", mapSpec.Name, err) + } + // Clone the map to avoid closing user's map later on. m, err := replaceMap.Clone() if err != nil { @@ -476,6 +536,7 @@ func (cl *collectionLoader) loadMap(mapName string) (*Map, error) { // that need to be finalized before invoking the verifier. if !mapSpec.Type.canStoreMapOrProgram() { if err := m.finalize(mapSpec); err != nil { + _ = m.Close() return nil, fmt.Errorf("finalizing map %s: %w", mapName, err) } } @@ -528,7 +589,7 @@ func (cl *collectionLoader) loadProgram(progName string) (*Program, error) { } } - prog, err := newProgramWithOptions(progSpec, cl.opts.Programs) + prog, err := newProgramWithOptions(progSpec, cl.opts.Programs, cl.types) if err != nil { return nil, fmt.Errorf("program %s: %w", progName, err) } @@ -537,6 +598,63 @@ func (cl *collectionLoader) loadProgram(progName string) (*Program, error) { return prog, nil } +func (cl *collectionLoader) loadVariable(varName string) (*Variable, error) { + if v := cl.vars[varName]; v != nil { + return v, nil + } + + varSpec := cl.coll.Variables[varName] + if varSpec == nil { + return nil, fmt.Errorf("unknown variable %s", varName) + } + + // Get the key of the VariableSpec's MapSpec in the CollectionSpec. + var mapName string + for n, ms := range cl.coll.Maps { + if ms == varSpec.m { + mapName = n + break + } + } + if mapName == "" { + return nil, fmt.Errorf("variable %s: underlying MapSpec %s was removed from CollectionSpec", varName, varSpec.m.Name) + } + + m, err := cl.loadMap(mapName) + if err != nil { + return nil, fmt.Errorf("variable %s: %w", varName, err) + } + + // If the kernel is too old or the underlying map was created without + // BPF_F_MMAPABLE, [Map.Memory] will return ErrNotSupported. In this case, + // emit a Variable with a nil Memory. This keeps Collection{Spec}.Variables + // consistent across systems with different feature sets without breaking + // LoadAndAssign. + var mm *Memory + if unsafeMemory { + mm, err = m.unsafeMemory() + } else { + mm, err = m.Memory() + } + if err != nil && !errors.Is(err, ErrNotSupported) { + return nil, fmt.Errorf("variable %s: getting memory for map %s: %w", varName, mapName, err) + } + + v, err := newVariable( + varSpec.name, + varSpec.offset, + varSpec.size, + varSpec.t, + mm, + ) + if err != nil { + return nil, fmt.Errorf("variable %s: %w", varName, err) + } + + cl.vars[varName] = v + return v, nil +} + // populateDeferredMaps iterates maps holding programs or other maps and loads // any dependencies. Populates all maps in cl and freezes them if specified. func (cl *collectionLoader) populateDeferredMaps() error { @@ -601,8 +719,13 @@ func resolveKconfig(m *MapSpec) error { return errors.New("map value is not a Datasec") } + if platform.IsWindows { + return fmt.Errorf(".kconfig: %w", internal.ErrNotSupportedOnOS) + } + type configInfo struct { offset uint32 + size uint32 typ btf.Type } @@ -619,7 +742,7 @@ func resolveKconfig(m *MapSpec) error { return fmt.Errorf("variable %s must be a 32 bits integer, got %s", n, v.Type) } - kv, err := internal.KernelVersion() + kv, err := linux.KernelVersion() if err != nil { return fmt.Errorf("getting kernel version: %w", err) } @@ -644,6 +767,7 @@ func resolveKconfig(m *MapSpec) error { default: // Catch CONFIG_*. configs[n] = configInfo{ offset: vsi.Offset, + size: vsi.Size, typ: v.Type, } } @@ -651,7 +775,7 @@ func resolveKconfig(m *MapSpec) error { // We only parse kconfig file if a CONFIG_* variable was found. if len(configs) > 0 { - f, err := kconfig.Find() + f, err := linux.FindKConfig() if err != nil { return fmt.Errorf("cannot find a kconfig file: %w", err) } @@ -670,10 +794,10 @@ func resolveKconfig(m *MapSpec) error { for n, info := range configs { value, ok := kernelConfig[n] if !ok { - return fmt.Errorf("config option %q does not exists for this kernel", n) + return fmt.Errorf("config option %q does not exist on this kernel", n) } - err := kconfig.PutValue(data[info.offset:], info.typ, value) + err := kconfig.PutValue(data[info.offset:info.offset+info.size], info.typ, value) if err != nil { return fmt.Errorf("problem adding value for %s: %w", n, err) } @@ -691,6 +815,13 @@ func resolveKconfig(m *MapSpec) error { // Omitting Collection.Close() during application shutdown is an error. // See the package documentation for details around Map and Program lifecycle. func LoadCollection(file string) (*Collection, error) { + if platform.IsWindows { + // This mirrors a check in efW. + if ext := filepath.Ext(file); ext == ".sys" { + return loadCollectionFromNativeImage(file) + } + } + spec, err := LoadCollectionSpec(file) if err != nil { return nil, err @@ -723,6 +854,7 @@ func LoadCollection(file string) (*Collection, error) { func (coll *Collection) Assign(to interface{}) error { assignedMaps := make(map[string]bool) assignedProgs := make(map[string]bool) + assignedVars := make(map[string]bool) // Assign() only transfers already-loaded Maps and Programs. No extra // loading is done. @@ -743,6 +875,13 @@ func (coll *Collection) Assign(to interface{}) error { } return nil, fmt.Errorf("missing map %q", name) + case reflect.TypeOf((*Variable)(nil)): + if v := coll.Variables[name]; v != nil { + assignedVars[name] = true + return v, nil + } + return nil, fmt.Errorf("missing variable %q", name) + default: return nil, fmt.Errorf("unsupported type %s", typ) } @@ -759,6 +898,9 @@ func (coll *Collection) Assign(to interface{}) error { for m := range assignedMaps { delete(coll.Maps, m) } + for s := range assignedVars { + delete(coll.Variables, s) + } return nil } diff --git a/src/runtime/vendor/github.com/cilium/ebpf/collection_other.go b/src/runtime/vendor/github.com/cilium/ebpf/collection_other.go new file mode 100644 index 000000000000..0e69bb83ac76 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/collection_other.go @@ -0,0 +1,9 @@ +//go:build !windows + +package ebpf + +import "github.com/cilium/ebpf/internal" + +func loadCollectionFromNativeImage(_ string) (*Collection, error) { + return nil, internal.ErrNotSupportedOnOS +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/collection_windows.go b/src/runtime/vendor/github.com/cilium/ebpf/collection_windows.go new file mode 100644 index 000000000000..c1bbaa21d1b6 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/collection_windows.go @@ -0,0 +1,136 @@ +package ebpf + +import ( + "errors" + "fmt" + "unsafe" + + "github.com/cilium/ebpf/internal/efw" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +func loadCollectionFromNativeImage(file string) (_ *Collection, err error) { + mapFds := make([]efw.FD, 16) + programFds := make([]efw.FD, 16) + var maps map[string]*Map + var programs map[string]*Program + + defer func() { + if err == nil { + return + } + + for _, fd := range append(mapFds, programFds...) { + // efW never uses fd 0. + if fd != 0 { + _ = efw.EbpfCloseFd(int(fd)) + } + } + + for _, m := range maps { + _ = m.Close() + } + + for _, p := range programs { + _ = p.Close() + } + }() + + nMaps, nPrograms, err := efw.EbpfObjectLoadNativeFds(file, mapFds, programFds) + if errors.Is(err, efw.EBPF_NO_MEMORY) && (nMaps > len(mapFds) || nPrograms > len(programFds)) { + mapFds = make([]efw.FD, nMaps) + programFds = make([]efw.FD, nPrograms) + + nMaps, nPrograms, err = efw.EbpfObjectLoadNativeFds(file, mapFds, programFds) + } + if err != nil { + return nil, err + } + + mapFds = mapFds[:nMaps] + programFds = programFds[:nPrograms] + + // The maximum length of a name is only 16 bytes on Linux, longer names + // are truncated. This is not a problem when loading from an ELF, since + // we get the full object name from the symbol table. + // When loading a native image we do not have this luxury. Use an efW native + // API to retrieve up to 64 bytes of the object name. + + maps = make(map[string]*Map, len(mapFds)) + for _, raw := range mapFds { + fd, err := sys.NewFD(int(raw)) + if err != nil { + return nil, err + } + + m, mapErr := newMapFromFD(fd) + if mapErr != nil { + _ = fd.Close() + return nil, mapErr + } + + var efwMapInfo efw.BpfMapInfo + size := uint32(unsafe.Sizeof(efwMapInfo)) + _, err = efw.EbpfObjectGetInfoByFd(m.FD(), unsafe.Pointer(&efwMapInfo), &size) + if err != nil { + _ = m.Close() + return nil, err + } + + if size >= uint32(unsafe.Offsetof(efwMapInfo.Name)+unsafe.Sizeof(efwMapInfo.Name)) { + m.name = unix.ByteSliceToString(efwMapInfo.Name[:]) + } + + if m.name == "" { + _ = m.Close() + return nil, fmt.Errorf("unnamed map") + } + + if _, ok := maps[m.name]; ok { + return nil, fmt.Errorf("duplicate map with the same name: %s", m.name) + } + + maps[m.name] = m + } + + programs = make(map[string]*Program, len(programFds)) + for _, raw := range programFds { + fd, err := sys.NewFD(int(raw)) + if err != nil { + return nil, err + } + + program, err := newProgramFromFD(fd) + if err != nil { + _ = fd.Close() + return nil, err + } + + var efwProgInfo efw.BpfProgInfo + size := uint32(unsafe.Sizeof(efwProgInfo)) + _, err = efw.EbpfObjectGetInfoByFd(program.FD(), unsafe.Pointer(&efwProgInfo), &size) + if err != nil { + _ = program.Close() + return nil, err + } + + if size >= uint32(unsafe.Offsetof(efwProgInfo.Name)+unsafe.Sizeof(efwProgInfo.Name)) { + program.name = unix.ByteSliceToString(efwProgInfo.Name[:]) + } + + if program.name == "" { + _ = program.Close() + return nil, fmt.Errorf("unnamed program") + } + + if _, ok := programs[program.name]; ok { + _ = program.Close() + return nil, fmt.Errorf("duplicate program with the same name: %s", program.name) + } + + programs[program.name] = program + } + + return &Collection{programs, maps, nil}, nil +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/cpu.go b/src/runtime/vendor/github.com/cilium/ebpf/cpu.go index 07e959efdcb2..3bcdc386db55 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/cpu.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/cpu.go @@ -1,16 +1,5 @@ package ebpf -import ( - "fmt" - "os" - "strings" - "sync" -) - -var possibleCPU = sync.OnceValues(func() (int, error) { - return parseCPUsFromFile("/sys/devices/system/cpu/possible") -}) - // PossibleCPU returns the max number of CPUs a system may possibly have // Logical CPU numbers must be of the form 0-n func PossibleCPU() (int, error) { @@ -26,41 +15,3 @@ func MustPossibleCPU() int { } return cpus } - -func parseCPUsFromFile(path string) (int, error) { - spec, err := os.ReadFile(path) - if err != nil { - return 0, err - } - - n, err := parseCPUs(string(spec)) - if err != nil { - return 0, fmt.Errorf("can't parse %s: %v", path, err) - } - - return n, nil -} - -// parseCPUs parses the number of cpus from a string produced -// by bitmap_list_string() in the Linux kernel. -// Multiple ranges are rejected, since they can't be unified -// into a single number. -// This is the format of /sys/devices/system/cpu/possible, it -// is not suitable for /sys/devices/system/cpu/online, etc. -func parseCPUs(spec string) (int, error) { - if strings.Trim(spec, "\n") == "0" { - return 1, nil - } - - var low, high int - n, err := fmt.Sscanf(spec, "%d-%d\n", &low, &high) - if n != 2 || err != nil { - return 0, fmt.Errorf("invalid format: %s", spec) - } - if low != 0 { - return 0, fmt.Errorf("CPU spec doesn't start at zero: %s", spec) - } - - // cpus is 0 indexed - return high + 1, nil -} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/cpu_other.go b/src/runtime/vendor/github.com/cilium/ebpf/cpu_other.go new file mode 100644 index 000000000000..eca5164c1289 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/cpu_other.go @@ -0,0 +1,13 @@ +//go:build !windows + +package ebpf + +import ( + "sync" + + "github.com/cilium/ebpf/internal/linux" +) + +var possibleCPU = sync.OnceValues(func() (int, error) { + return linux.ParseCPUsFromFile("/sys/devices/system/cpu/possible") +}) diff --git a/src/runtime/vendor/github.com/cilium/ebpf/cpu_windows.go b/src/runtime/vendor/github.com/cilium/ebpf/cpu_windows.go new file mode 100644 index 000000000000..9448b0916416 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/cpu_windows.go @@ -0,0 +1,11 @@ +package ebpf + +import ( + "sync" + + "golang.org/x/sys/windows" +) + +var possibleCPU = sync.OnceValues(func() (int, error) { + return int(windows.GetMaximumProcessorCount(windows.ALL_PROCESSOR_GROUPS)), nil +}) diff --git a/src/runtime/vendor/github.com/cilium/ebpf/elf_reader.go b/src/runtime/vendor/github.com/cilium/ebpf/elf_reader.go index 620037d80a8a..e2b21fa57ce1 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/elf_reader.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/elf_reader.go @@ -10,13 +10,14 @@ import ( "io" "math" "os" + "slices" "strings" "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/btf" "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/platform" "github.com/cilium/ebpf/internal/sys" - "github.com/cilium/ebpf/internal/unix" ) type kconfigMetaKey struct{} @@ -33,6 +34,13 @@ type kfuncMeta struct { Func *btf.Func } +type ksymMetaKey struct{} + +type ksymMeta struct { + Binding elf.SymBind + Name string +} + // elfCode is a convenience to reduce the amount of arguments that have to // be passed around explicitly. You should treat its contents as immutable. type elfCode struct { @@ -43,7 +51,9 @@ type elfCode struct { btf *btf.Spec extInfo *btf.ExtInfos maps map[string]*MapSpec + vars map[string]*VariableSpec kfuncs map[string]*btf.Func + ksyms map[string]struct{} kconfig *MapSpec } @@ -71,8 +81,8 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) { // Checks if the ELF file is for BPF data. // Old LLVM versions set e_machine to EM_NONE. - if f.File.Machine != unix.EM_NONE && f.File.Machine != elf.EM_BPF { - return nil, fmt.Errorf("unexpected machine type for BPF ELF: %s", f.File.Machine) + if f.Machine != elf.EM_NONE && f.Machine != elf.EM_BPF { + return nil, fmt.Errorf("unexpected machine type for BPF ELF: %s", f.Machine) } var ( @@ -101,7 +111,7 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) { sections[idx] = newElfSection(sec, mapSection) case sec.Name == ".maps": sections[idx] = newElfSection(sec, btfMapSection) - case sec.Name == ".bss" || sec.Name == ".data" || strings.HasPrefix(sec.Name, ".rodata"): + case isDataSection(sec.Name): sections[idx] = newElfSection(sec, dataSection) case sec.Type == elf.SHT_REL: // Store relocations under the section index of the target @@ -134,7 +144,9 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) { btf: btfSpec, extInfo: btfExtInfo, maps: make(map[string]*MapSpec), + vars: make(map[string]*VariableSpec), kfuncs: make(map[string]*btf.Func), + ksyms: make(map[string]struct{}), } symbols, err := f.Symbols() @@ -174,7 +186,13 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) { return nil, fmt.Errorf("load programs: %w", err) } - return &CollectionSpec{ec.maps, progs, btfSpec, ec.ByteOrder}, nil + return &CollectionSpec{ + ec.maps, + progs, + ec.vars, + btfSpec, + ec.ByteOrder, + }, nil } func loadLicense(sec *elf.Section) (string, error) { @@ -201,6 +219,18 @@ func loadVersion(sec *elf.Section, bo binary.ByteOrder) (uint32, error) { return version, nil } +func isDataSection(name string) bool { + return name == ".bss" || strings.HasPrefix(name, ".data") || strings.HasPrefix(name, ".rodata") +} + +func isConstantDataSection(name string) bool { + return strings.HasPrefix(name, ".rodata") +} + +func isKconfigSection(name string) bool { + return name == ".kconfig" +} + type elfSectionKind int const ( @@ -380,7 +410,8 @@ func (ec *elfCode) loadFunctions(section *elfSection) (map[string]asm.Instructio // Decode the section's instruction stream. insns := make(asm.Instructions, 0, section.Size/asm.InstructionSize) - if err := insns.Unmarshal(r, ec.ByteOrder); err != nil { + insns, err := asm.AppendInstructions(insns, r, ec.ByteOrder, platform.Linux) + if err != nil { return nil, fmt.Errorf("decoding instructions for section %s: %w", section.Name, err) } if len(insns) == 0 { @@ -506,7 +537,7 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err case elf.STT_OBJECT: // LLVM 9 emits OBJECT-LOCAL symbols for anonymous constants. - if bind != elf.STB_GLOBAL && bind != elf.STB_LOCAL { + if bind != elf.STB_GLOBAL && bind != elf.STB_LOCAL && bind != elf.STB_WEAK { return fmt.Errorf("direct load: %s: %w: %s", name, errUnsupportedBinding, bind) } @@ -614,6 +645,8 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err } kf := ec.kfuncs[name] + _, ks := ec.ksyms[name] + switch { // If a Call / DWordLoad instruction is found and the datasec has a btf.Func with a Name // that matches the symbol name we mark the instruction as a referencing a kfunc. @@ -634,6 +667,15 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err ins.Constant = 0 + case ks && ins.OpCode.IsDWordLoad(): + if bind != elf.STB_GLOBAL && bind != elf.STB_WEAK { + return fmt.Errorf("asm relocation: %s: %w: %s", name, errUnsupportedBinding, bind) + } + ins.Metadata.Set(ksymMetaKey{}, &ksymMeta{ + Binding: bind, + Name: name, + }) + // If no kconfig map is found, this must be a symbol reference from inline // asm (see testdata/loader.c:asm_relocation()) or a call to a forward // function declaration (see testdata/fwd_decl.c). Don't interfere, These @@ -683,6 +725,22 @@ func (ec *elfCode) loadMaps() error { return fmt.Errorf("section %v: map descriptors are not of equal size", sec.Name) } + // If the ELF has BTF, pull out the btf.Var for each map definition to + // extract decl tags from. + varsByName := make(map[string]*btf.Var) + if ec.btf != nil { + var ds *btf.Datasec + if err := ec.btf.TypeByName(sec.Name, &ds); err == nil { + for _, vsi := range ds.Vars { + v, ok := btf.As[*btf.Var](vsi.Type) + if !ok { + return fmt.Errorf("section %v: btf.VarSecInfo doesn't point to a *btf.Var: %T", sec.Name, vsi.Type) + } + varsByName[string(v.Name)] = v + } + } + } + var ( r = bufio.NewReader(sec.Open()) size = sec.Size / uint64(nSym) @@ -701,7 +759,7 @@ func (ec *elfCode) loadMaps() error { lr := io.LimitReader(r, int64(size)) spec := MapSpec{ - Name: SanitizeName(mapName, -1), + Name: sanitizeName(mapName, -1), } switch { case binary.Read(lr, ec.ByteOrder, &spec.Type) != nil: @@ -724,6 +782,10 @@ func (ec *elfCode) loadMaps() error { spec.Extra = bytes.NewReader(extra) } + if v, ok := varsByName[mapName]; ok { + spec.Tags = slices.Clone(v.Tags) + } + ec.maps[mapName] = &spec } } @@ -980,8 +1042,20 @@ func mapSpecFromBTF(es *elfSection, vs *btf.VarSecinfo, def *btf.Struct, spec *b } } + // Some maps don't support value sizes, but annotating their map definitions + // with __type macros can still be useful, especially to let bpf2go generate + // type definitions for them. + if value != nil && !mapType.canHaveValueSize() { + valueSize = 0 + } + + v, ok := btf.As[*btf.Var](vs.Type) + if !ok { + return nil, fmt.Errorf("BTF map definition: btf.VarSecInfo doesn't point to a *btf.Var: %T", vs.Type) + } + return &MapSpec{ - Name: SanitizeName(name, -1), + Name: sanitizeName(name, -1), Type: MapType(mapType), KeySize: keySize, ValueSize: valueSize, @@ -992,6 +1066,7 @@ func mapSpecFromBTF(es *elfSection, vs *btf.VarSecinfo, def *btf.Struct, spec *b Pinning: pinType, InnerMap: innerMapSpec, Contents: contents, + Tags: slices.Clone(v.Tags), }, nil } @@ -1047,7 +1122,7 @@ func resolveBTFValuesContents(es *elfSection, vs *btf.VarSecinfo, member btf.Mem end := vs.Size + vs.Offset // The size of an address in this section. This determines the width of // an index in the array. - align := uint32(es.SectionHeader.Addralign) + align := uint32(es.Addralign) // Check if variable-length section is aligned. if (end-start)%align != 0 { @@ -1092,20 +1167,33 @@ func (ec *elfCode) loadDataSections() error { continue } - if sec.references == 0 { - // Prune data sections which are not referenced by any - // instructions. + // If a section has no references, it will be freed as soon as the + // Collection closes, so creating and populating it is wasteful. If it has + // no symbols, it is likely an ephemeral section used during compilation + // that wasn't sanitized by the bpf linker. (like .rodata.str1.1) + // + // No symbols means no VariableSpecs can be generated from it, making it + // pointless to emit a data section for. + if sec.references == 0 && len(sec.symbols) == 0 { continue } + if sec.Size > math.MaxUint32 { + return fmt.Errorf("data section %s: contents exceed maximum size", sec.Name) + } + mapSpec := &MapSpec{ - Name: SanitizeName(sec.Name, -1), + Name: sanitizeName(sec.Name, -1), Type: Array, KeySize: 4, ValueSize: uint32(sec.Size), MaxEntries: 1, } + if isConstantDataSection(sec.Name) { + mapSpec.Flags = sys.BPF_F_RDONLY_PROG + } + switch sec.Type { // Only open the section if we know there's actual data to be read. case elf.SHT_PROGBITS: @@ -1113,20 +1201,56 @@ func (ec *elfCode) loadDataSections() error { if err != nil { return fmt.Errorf("data section %s: can't get contents: %w", sec.Name, err) } - - if uint64(len(data)) > math.MaxUint32 { - return fmt.Errorf("data section %s: contents exceed maximum size", sec.Name) - } mapSpec.Contents = []MapKV{{uint32(0), data}} case elf.SHT_NOBITS: - // NOBITS sections like .bss contain only zeroes, and since data sections - // are Arrays, the kernel already preallocates them. Skip reading zeroes - // from the ELF. + // NOBITS sections like .bss contain only zeroes and are not allocated in + // the ELF. Since data sections are Arrays, the kernel can preallocate + // them. Don't attempt reading zeroes from the ELF, instead allocate the + // zeroed memory to support getting and setting VariableSpecs for sections + // like .bss. + mapSpec.Contents = []MapKV{{uint32(0), make([]byte, sec.Size)}} + default: return fmt.Errorf("data section %s: unknown section type %s", sec.Name, sec.Type) } + for off, sym := range sec.symbols { + // Skip symbols marked with the 'hidden' attribute. + if elf.ST_VISIBILITY(sym.Other) == elf.STV_HIDDEN || + elf.ST_VISIBILITY(sym.Other) == elf.STV_INTERNAL { + continue + } + + // Only accept symbols with global or weak bindings. The common + // alternative is STB_LOCAL, which are either function-scoped or declared + // 'static'. + if elf.ST_BIND(sym.Info) != elf.STB_GLOBAL && + elf.ST_BIND(sym.Info) != elf.STB_WEAK { + continue + } + + if ec.vars[sym.Name] != nil { + return fmt.Errorf("data section %s: duplicate variable %s", sec.Name, sym.Name) + } + + // Skip symbols starting with a dot, they are compiler-internal symbols + // emitted by clang 11 and earlier and are not cleaned up by the bpf + // compiler backend (e.g. symbols named .Lconstinit.1 in sections like + // .rodata.cst32). Variables in C cannot start with a dot, so filter these + // out. + if strings.HasPrefix(sym.Name, ".") { + continue + } + + ec.vars[sym.Name] = &VariableSpec{ + name: sym.Name, + offset: off, + size: sym.Size, + m: mapSpec, + } + } + // It is possible for a data section to exist without a corresponding BTF Datasec // if it only contains anonymous values like macro-defined arrays. if ec.btf != nil { @@ -1135,12 +1259,38 @@ func (ec *elfCode) loadDataSections() error { // Assign the spec's key and BTF only if the Datasec lookup was successful. mapSpec.Key = &btf.Void{} mapSpec.Value = ds - } - } - if strings.HasPrefix(sec.Name, ".rodata") { - mapSpec.Flags = unix.BPF_F_RDONLY_PROG - mapSpec.Freeze = true + // Populate VariableSpecs with type information, if available. + for _, v := range ds.Vars { + name := v.Type.TypeName() + if name == "" { + return fmt.Errorf("data section %s: anonymous variable %v", sec.Name, v) + } + + vt, ok := v.Type.(*btf.Var) + if !ok { + return fmt.Errorf("data section %s: unexpected type %T for variable %s", sec.Name, v.Type, name) + } + + ev := ec.vars[name] + if ev == nil { + // Hidden symbols appear in the BTF Datasec but don't receive a VariableSpec. + continue + } + + if uint64(v.Offset) != ev.offset { + return fmt.Errorf("data section %s: variable %s datasec offset (%d) doesn't match ELF symbol offset (%d)", sec.Name, name, v.Offset, ev.offset) + } + + if uint64(v.Size) != ev.size { + return fmt.Errorf("data section %s: variable %s size in datasec (%d) doesn't match ELF symbol size (%d)", sec.Name, name, v.Size, ev.size) + } + + // Decouple the Var in the VariableSpec from the underlying DataSec in + // the MapSpec to avoid modifications from affecting map loads later on. + ev.t = btf.Copy(vt).(*btf.Var) + } + } } ec.maps[sec.Name] = mapSpec @@ -1175,8 +1325,7 @@ func (ec *elfCode) loadKconfigSection() error { KeySize: uint32(4), ValueSize: ds.Size, MaxEntries: 1, - Flags: unix.BPF_F_RDONLY_PROG, - Freeze: true, + Flags: sys.BPF_F_RDONLY_PROG, Key: &btf.Int{Size: 4}, Value: ds, } @@ -1201,8 +1350,14 @@ func (ec *elfCode) loadKsymsSection() error { } for _, v := range ds.Vars { - // we have already checked the .ksyms Datasec to only contain Func Vars. - ec.kfuncs[v.Type.TypeName()] = v.Type.(*btf.Func) + switch t := v.Type.(type) { + case *btf.Func: + ec.kfuncs[t.TypeName()] = t + case *btf.Var: + ec.ksyms[t.TypeName()] = struct{}{} + default: + return fmt.Errorf("unexpected variable type in .ksyms: %T", v) + } } return nil @@ -1266,10 +1421,10 @@ func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) { var flags uint32 if t.flags&_SEC_SLEEPABLE > 0 { - flags |= unix.BPF_F_SLEEPABLE + flags |= sys.BPF_F_SLEEPABLE } if t.flags&_SEC_XDP_FRAGS > 0 { - flags |= unix.BPF_F_XDP_HAS_FRAGS + flags |= sys.BPF_F_XDP_HAS_FRAGS } if t.flags&_SEC_EXP_ATTACH_OPT > 0 { if programType == XDP { diff --git a/src/runtime/vendor/github.com/cilium/ebpf/elf_sections.go b/src/runtime/vendor/github.com/cilium/ebpf/elf_sections.go index 4b58251d9ab4..43dcfb103ee0 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/elf_sections.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/elf_sections.go @@ -18,6 +18,7 @@ var elfSectionDefs = []libbpfElfSectionDef{ {"uretprobe.s+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_SLEEPABLE}, {"kprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_KPROBE_MULTI, _SEC_NONE}, {"kretprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_KPROBE_MULTI, _SEC_NONE}, + {"kprobe.session+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_KPROBE_SESSION, _SEC_NONE}, {"uprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_NONE}, {"uretprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_NONE}, {"uprobe.multi.s+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_SLEEPABLE}, @@ -69,6 +70,7 @@ var elfSectionDefs = []libbpfElfSectionDef{ {"sockops", sys.BPF_PROG_TYPE_SOCK_OPS, sys.BPF_CGROUP_SOCK_OPS, _SEC_ATTACHABLE_OPT}, {"sk_skb/stream_parser", sys.BPF_PROG_TYPE_SK_SKB, sys.BPF_SK_SKB_STREAM_PARSER, _SEC_ATTACHABLE_OPT}, {"sk_skb/stream_verdict", sys.BPF_PROG_TYPE_SK_SKB, sys.BPF_SK_SKB_STREAM_VERDICT, _SEC_ATTACHABLE_OPT}, + {"sk_skb/verdict", sys.BPF_PROG_TYPE_SK_SKB, sys.BPF_SK_SKB_VERDICT, _SEC_ATTACHABLE_OPT}, {"sk_skb", sys.BPF_PROG_TYPE_SK_SKB, 0, _SEC_NONE}, {"sk_msg", sys.BPF_PROG_TYPE_SK_MSG, sys.BPF_SK_MSG_VERDICT, _SEC_ATTACHABLE_OPT}, {"lirc_mode2", sys.BPF_PROG_TYPE_LIRC_MODE2, sys.BPF_LIRC_MODE2, _SEC_ATTACHABLE_OPT}, diff --git a/src/runtime/vendor/github.com/cilium/ebpf/info.go b/src/runtime/vendor/github.com/cilium/ebpf/info.go index 04c60c64b893..b15202a0f5c5 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/info.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/info.go @@ -8,14 +8,13 @@ import ( "fmt" "io" "os" - "strings" - "syscall" + "reflect" "time" - "unsafe" "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/btf" "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/platform" "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/unix" ) @@ -39,53 +38,131 @@ import ( // MapInfo describes a map. type MapInfo struct { - Type MapType - id MapID - KeySize uint32 - ValueSize uint32 + // Type of the map. + Type MapType + // KeySize is the size of the map key in bytes. + KeySize uint32 + // ValueSize is the size of the map value in bytes. + ValueSize uint32 + // MaxEntries is the maximum number of entries the map can hold. Its meaning + // is map-specific. MaxEntries uint32 - Flags uint32 + // Flags used during map creation. + Flags uint32 // Name as supplied by user space at load time. Available from 4.15. Name string - btf btf.ID + id MapID + btf btf.ID + mapExtra uint64 + memlock uint64 + frozen bool } -func newMapInfoFromFd(fd *sys.FD) (*MapInfo, error) { +// minimalMapInfoFromFd queries the minimum information needed to create a Map +// based on a file descriptor. This requires the map type, key/value sizes, +// maxentries and flags. +// +// Does not fall back to fdinfo since the version gap between fdinfo (4.10) and +// [sys.ObjInfo] (4.13) is small and both kernels are EOL since at least Nov +// 2017. +// +// Requires at least Linux 4.13. +func minimalMapInfoFromFd(fd *sys.FD) (*MapInfo, error) { var info sys.MapInfo - err := sys.ObjInfo(fd, &info) - if errors.Is(err, syscall.EINVAL) { - return newMapInfoFromProc(fd) + if err := sys.ObjInfo(fd, &info); err != nil { + return nil, fmt.Errorf("getting object info: %w", err) } + + typ, err := MapTypeForPlatform(platform.Native, info.Type) if err != nil { - return nil, err + return nil, fmt.Errorf("map type: %w", err) } return &MapInfo{ - MapType(info.Type), - MapID(info.Id), + Type: typ, + KeySize: info.KeySize, + ValueSize: info.ValueSize, + MaxEntries: info.MaxEntries, + Flags: uint32(info.MapFlags), + Name: unix.ByteSliceToString(info.Name[:]), + }, nil +} + +// newMapInfoFromFd queries map information about the given fd. [sys.ObjInfo] is +// attempted first, supplementing any missing values with information from +// /proc/self/fdinfo. Ignores EINVAL from ObjInfo as well as ErrNotSupported +// from reading fdinfo (indicating the file exists, but no fields of interest +// were found). If both fail, an error is always returned. +func newMapInfoFromFd(fd *sys.FD) (*MapInfo, error) { + var info sys.MapInfo + err1 := sys.ObjInfo(fd, &info) + // EINVAL means the kernel doesn't support BPF_OBJ_GET_INFO_BY_FD. Continue + // with fdinfo if that's the case. + if err1 != nil && !errors.Is(err1, unix.EINVAL) { + return nil, fmt.Errorf("getting object info: %w", err1) + } + + typ, err := MapTypeForPlatform(platform.Native, info.Type) + if err != nil { + return nil, fmt.Errorf("map type: %w", err) + } + + mi := &MapInfo{ + typ, info.KeySize, info.ValueSize, info.MaxEntries, uint32(info.MapFlags), unix.ByteSliceToString(info.Name[:]), + MapID(info.Id), btf.ID(info.BtfId), - }, nil + info.MapExtra, + 0, + false, + } + + // Supplement OBJ_INFO with data from /proc/self/fdinfo. It contains fields + // like memlock and frozen that are not present in OBJ_INFO. + err2 := readMapInfoFromProc(fd, mi) + if err2 != nil && !errors.Is(err2, ErrNotSupported) { + return nil, fmt.Errorf("getting map info from fdinfo: %w", err2) + } + + if err1 != nil && err2 != nil { + return nil, fmt.Errorf("ObjInfo and fdinfo both failed: objinfo: %w, fdinfo: %w", err1, err2) + } + + return mi, nil } -func newMapInfoFromProc(fd *sys.FD) (*MapInfo, error) { - var mi MapInfo +// readMapInfoFromProc queries map information about the given fd from +// /proc/self/fdinfo. It only writes data into fields that have a zero value. +func readMapInfoFromProc(fd *sys.FD, mi *MapInfo) error { + var mapType uint32 err := scanFdInfo(fd, map[string]interface{}{ - "map_type": &mi.Type, + "map_type": &mapType, + "map_id": &mi.id, "key_size": &mi.KeySize, "value_size": &mi.ValueSize, "max_entries": &mi.MaxEntries, "map_flags": &mi.Flags, + "map_extra": &mi.mapExtra, + "memlock": &mi.memlock, + "frozen": &mi.frozen, }) if err != nil { - return nil, err + return err } - return &mi, nil + + if mi.Type == 0 { + mi.Type, err = MapTypeForPlatform(platform.Linux, mapType) + if err != nil { + return fmt.Errorf("map type: %w", err) + } + } + + return nil } // ID returns the map ID. @@ -109,18 +186,108 @@ func (mi *MapInfo) BTFID() (btf.ID, bool) { return mi.btf, mi.btf > 0 } -// programStats holds statistics of a program. -type programStats struct { - // Total accumulated runtime of the program ins ns. - runtime time.Duration - // Total number of times the program was called. - runCount uint64 - // Total number of times the programm was NOT called. - // Added in commit 9ed9e9ba2337 ("bpf: Count the number of times recursion was prevented"). - recursionMisses uint64 +// MapExtra returns an opaque field whose meaning is map-specific. +// +// Available from 5.16. +// +// The bool return value indicates whether this optional field is available and +// populated, if it was specified during Map creation. +func (mi *MapInfo) MapExtra() (uint64, bool) { + return mi.mapExtra, mi.mapExtra > 0 } -// ProgramInfo describes a program. +// Memlock returns an approximate number of bytes allocated to this map. +// +// Available from 4.10. +// +// The bool return value indicates whether this optional field is available. +func (mi *MapInfo) Memlock() (uint64, bool) { + return mi.memlock, mi.memlock > 0 +} + +// Frozen indicates whether [Map.Freeze] was called on this map. If true, +// modifications from user space are not allowed. +// +// Available from 5.2. Requires access to procfs. +// +// If the kernel doesn't support map freezing, this field will always be false. +func (mi *MapInfo) Frozen() bool { + return mi.frozen +} + +// ProgramStats contains runtime statistics for a single [Program], returned by +// [Program.Stats]. +// +// Will contain mostly zero values if the collection of statistics is not +// enabled, see [EnableStats]. +type ProgramStats struct { + // Total accumulated runtime of the Program. + // + // Requires at least Linux 5.8. + Runtime time.Duration + + // Total number of times the Program has executed. + // + // Requires at least Linux 5.8. + RunCount uint64 + + // Total number of times the program was not executed due to recursion. This + // can happen when another bpf program is already running on the cpu, when bpf + // program execution is interrupted, for example. + // + // Requires at least Linux 5.12. + RecursionMisses uint64 +} + +func newProgramStatsFromFd(fd *sys.FD) (*ProgramStats, error) { + var info sys.ProgInfo + if err := sys.ObjInfo(fd, &info); err != nil { + return nil, fmt.Errorf("getting program info: %w", err) + } + + return &ProgramStats{ + Runtime: time.Duration(info.RunTimeNs), + RunCount: info.RunCnt, + RecursionMisses: info.RecursionMisses, + }, nil +} + +// programJitedInfo holds information about JITed info of a program. +type programJitedInfo struct { + // ksyms holds the ksym addresses of the BPF program, including those of its + // subprograms. + // + // Available from 4.18. + ksyms []uint64 + numKsyms uint32 + + // insns holds the JITed machine native instructions of the program, + // including those of its subprograms. + // + // Available from 4.13. + insns []byte + numInsns uint32 + + // lineInfos holds the JITed line infos, which are kernel addresses. + // + // Available from 5.0. + lineInfos []uint64 + numLineInfos uint32 + + // lineInfoRecSize is the size of a single line info record. + // + // Available from 5.0. + lineInfoRecSize uint32 + + // funcLens holds the insns length of each function. + // + // Available from 4.18. + funcLens []uint32 + numFuncLens uint32 +} + +// ProgramInfo describes a Program's immutable metadata. For runtime statistics, +// see [ProgramStats]. type ProgramInfo struct { Type ProgramType id ProgramID @@ -132,38 +299,93 @@ type ProgramInfo struct { createdByUID uint32 haveCreatedByUID bool btf btf.ID - stats *programStats + loadTime time.Duration - maps []MapID - insns []byte + maps []MapID + insns []byte + jitedSize uint32 + verifiedInstructions uint32 + + jitedInfo programJitedInfo lineInfos []byte numLineInfos uint32 funcInfos []byte numFuncInfos uint32 + + memlock uint64 } +// minimalProgramFromFd queries the minimum information needed to create a +// Program based on a file descriptor, requiring at least the program type. +// +// Does not fall back to fdinfo since the version gap between fdinfo (4.10) and +// [sys.ObjInfo] (4.13) is small and both kernels are EOL since at least Nov +// 2017. +// +// Requires at least Linux 4.13. +func minimalProgramInfoFromFd(fd *sys.FD) (*ProgramInfo, error) { + var info sys.ProgInfo + if err := sys.ObjInfo(fd, &info); err != nil { + return nil, fmt.Errorf("getting object info: %w", err) + } + + typ, err := ProgramTypeForPlatform(platform.Native, info.Type) + if err != nil { + return nil, fmt.Errorf("program type: %w", err) + } + + return &ProgramInfo{ + Type: typ, + Name: unix.ByteSliceToString(info.Name[:]), + }, nil +} + +// newProgramInfoFromFd queries program information about the given fd. +// +// [sys.ObjInfo] is attempted first, supplementing any missing values with +// information from /proc/self/fdinfo. Ignores EINVAL from ObjInfo as well as +// ErrNotSupported from reading fdinfo (indicating the file exists, but no +// fields of interest were found). If both fail, an error is always returned. func newProgramInfoFromFd(fd *sys.FD) (*ProgramInfo, error) { var info sys.ProgInfo - err := sys.ObjInfo(fd, &info) - if errors.Is(err, syscall.EINVAL) { - return newProgramInfoFromProc(fd) + err1 := sys.ObjInfo(fd, &info) + // EINVAL means the kernel doesn't support BPF_OBJ_GET_INFO_BY_FD. Continue + // with fdinfo if that's the case. + if err1 != nil && !errors.Is(err1, unix.EINVAL) { + return nil, fmt.Errorf("getting object info: %w", err1) } + + typ, err := ProgramTypeForPlatform(platform.Native, info.Type) if err != nil { - return nil, err + return nil, fmt.Errorf("program type: %w", err) } pi := ProgramInfo{ - Type: ProgramType(info.Type), - id: ProgramID(info.Id), - Tag: hex.EncodeToString(info.Tag[:]), - Name: unix.ByteSliceToString(info.Name[:]), - btf: btf.ID(info.BtfId), - stats: &programStats{ - runtime: time.Duration(info.RunTimeNs), - runCount: info.RunCnt, - recursionMisses: info.RecursionMisses, - }, + Type: typ, + id: ProgramID(info.Id), + Tag: hex.EncodeToString(info.Tag[:]), + Name: unix.ByteSliceToString(info.Name[:]), + btf: btf.ID(info.BtfId), + jitedSize: info.JitedProgLen, + loadTime: time.Duration(info.LoadTime), + verifiedInstructions: info.VerifiedInsns, + } + + // Supplement OBJ_INFO with data from /proc/self/fdinfo. It contains fields + // like memlock that is not present in OBJ_INFO. + err2 := readProgramInfoFromProc(fd, &pi) + if err2 != nil && !errors.Is(err2, ErrNotSupported) { + return nil, fmt.Errorf("getting map info from fdinfo: %w", err2) + } + + if err1 != nil && err2 != nil { + return nil, fmt.Errorf("ObjInfo and fdinfo both failed: objinfo: %w, fdinfo: %w", err1, err2) + } + + if platform.IsWindows && info.Tag == [8]uint8{} { + // Windows doesn't support the tag field, clear it for now. + pi.Tag = "" } // Start with a clean struct for the second call, otherwise we may get EFAULT. @@ -174,7 +396,7 @@ func newProgramInfoFromFd(fd *sys.FD) (*ProgramInfo, error) { if info.NrMapIds > 0 { pi.maps = make([]MapID, info.NrMapIds) info2.NrMapIds = info.NrMapIds - info2.MapIds = sys.NewPointer(unsafe.Pointer(&pi.maps[0])) + info2.MapIds = sys.SlicePointer(pi.maps) makeSecondCall = true } else if haveProgramInfoMapIDs() == nil { // This program really has no associated maps. @@ -185,7 +407,7 @@ func newProgramInfoFromFd(fd *sys.FD) (*ProgramInfo, error) { } // createdByUID and NrMapIds were introduced in the same kernel version. - if pi.maps != nil { + if pi.maps != nil && platform.IsLinux { pi.createdByUID = info.CreatedByUid pi.haveCreatedByUID = true } @@ -193,13 +415,13 @@ func newProgramInfoFromFd(fd *sys.FD) (*ProgramInfo, error) { if info.XlatedProgLen > 0 { pi.insns = make([]byte, info.XlatedProgLen) info2.XlatedProgLen = info.XlatedProgLen - info2.XlatedProgInsns = sys.NewSlicePointer(pi.insns) + info2.XlatedProgInsns = sys.SlicePointer(pi.insns) makeSecondCall = true } if info.NrLineInfo > 0 { pi.lineInfos = make([]byte, btf.LineInfoSize*info.NrLineInfo) - info2.LineInfo = sys.NewSlicePointer(pi.lineInfos) + info2.LineInfo = sys.SlicePointer(pi.lineInfos) info2.LineInfoRecSize = btf.LineInfoSize info2.NrLineInfo = info.NrLineInfo pi.numLineInfos = info.NrLineInfo @@ -208,13 +430,47 @@ func newProgramInfoFromFd(fd *sys.FD) (*ProgramInfo, error) { if info.NrFuncInfo > 0 { pi.funcInfos = make([]byte, btf.FuncInfoSize*info.NrFuncInfo) - info2.FuncInfo = sys.NewSlicePointer(pi.funcInfos) + info2.FuncInfo = sys.SlicePointer(pi.funcInfos) info2.FuncInfoRecSize = btf.FuncInfoSize info2.NrFuncInfo = info.NrFuncInfo pi.numFuncInfos = info.NrFuncInfo makeSecondCall = true } + pi.jitedInfo.lineInfoRecSize = info.JitedLineInfoRecSize + if info.JitedProgLen > 0 { + pi.jitedInfo.numInsns = info.JitedProgLen + pi.jitedInfo.insns = make([]byte, info.JitedProgLen) + info2.JitedProgLen = info.JitedProgLen + info2.JitedProgInsns = sys.SlicePointer(pi.jitedInfo.insns) + makeSecondCall = true + } + + if info.NrJitedFuncLens > 0 { + pi.jitedInfo.numFuncLens = info.NrJitedFuncLens + pi.jitedInfo.funcLens = make([]uint32, info.NrJitedFuncLens) + info2.NrJitedFuncLens = info.NrJitedFuncLens + info2.JitedFuncLens = sys.SlicePointer(pi.jitedInfo.funcLens) + makeSecondCall = true + } + + if info.NrJitedLineInfo > 0 { + pi.jitedInfo.numLineInfos = info.NrJitedLineInfo + pi.jitedInfo.lineInfos = make([]uint64, info.NrJitedLineInfo) + info2.NrJitedLineInfo = info.NrJitedLineInfo + info2.JitedLineInfo = sys.SlicePointer(pi.jitedInfo.lineInfos) + info2.JitedLineInfoRecSize = info.JitedLineInfoRecSize + makeSecondCall = true + } + + if info.NrJitedKsyms > 0 { + pi.jitedInfo.numKsyms = info.NrJitedKsyms + pi.jitedInfo.ksyms = make([]uint64, info.NrJitedKsyms) + info2.JitedKsyms = sys.SlicePointer(pi.jitedInfo.ksyms) + info2.NrJitedKsyms = info.NrJitedKsyms + makeSecondCall = true + } + if makeSecondCall { if err := sys.ObjInfo(fd, &info2); err != nil { return nil, err @@ -224,23 +480,29 @@ func newProgramInfoFromFd(fd *sys.FD) (*ProgramInfo, error) { return &pi, nil } -func newProgramInfoFromProc(fd *sys.FD) (*ProgramInfo, error) { - var info ProgramInfo +func readProgramInfoFromProc(fd *sys.FD, pi *ProgramInfo) error { + var progType uint32 err := scanFdInfo(fd, map[string]interface{}{ - "prog_type": &info.Type, - "prog_tag": &info.Tag, + "prog_type": &progType, + "prog_tag": &pi.Tag, + "memlock": &pi.memlock, }) - if errors.Is(err, errMissingFields) { - return nil, &internal.UnsupportedFeatureError{ + if errors.Is(err, ErrNotSupported) && !errors.Is(err, internal.ErrNotSupportedOnOS) { + return &internal.UnsupportedFeatureError{ Name: "reading program info from /proc/self/fdinfo", MinimumVersion: internal.Version{4, 10, 0}, } } if err != nil { - return nil, err + return err + } + + pi.Type, err = ProgramTypeForPlatform(platform.Linux, progType) + if err != nil { + return fmt.Errorf("program type: %w", err) } - return &info, nil + return nil } // ID returns the program ID. @@ -273,36 +535,50 @@ func (pi *ProgramInfo) BTFID() (btf.ID, bool) { return pi.btf, pi.btf > 0 } -// RunCount returns the total number of times the program was called. -// -// Can return 0 if the collection of statistics is not enabled. See EnableStats(). -// The bool return value indicates whether this optional field is available. -func (pi *ProgramInfo) RunCount() (uint64, bool) { - if pi.stats != nil { - return pi.stats.runCount, true +// btfSpec returns the BTF spec associated with the program. +func (pi *ProgramInfo) btfSpec() (*btf.Spec, error) { + id, ok := pi.BTFID() + if !ok { + return nil, fmt.Errorf("program created without BTF or unsupported kernel: %w", ErrNotSupported) + } + + h, err := btf.NewHandleFromID(id) + if err != nil { + return nil, fmt.Errorf("get BTF handle: %w", err) + } + defer h.Close() + + spec, err := h.Spec(nil) + if err != nil { + return nil, fmt.Errorf("get BTF spec: %w", err) } - return 0, false + + return spec, nil } -// Runtime returns the total accumulated runtime of the program. +// LineInfos returns the BTF line information of the program. // -// Can return 0 if the collection of statistics is not enabled. See EnableStats(). -// The bool return value indicates whether this optional field is available. -func (pi *ProgramInfo) Runtime() (time.Duration, bool) { - if pi.stats != nil { - return pi.stats.runtime, true +// Available from 5.0. +// +// Requires CAP_SYS_ADMIN or equivalent for reading BTF information. Returns +// ErrNotSupported if the program was created without BTF or if the kernel +// doesn't support the field. +func (pi *ProgramInfo) LineInfos() (btf.LineOffsets, error) { + if len(pi.lineInfos) == 0 { + return nil, fmt.Errorf("insufficient permissions or unsupported kernel: %w", ErrNotSupported) } - return time.Duration(0), false -} -// RecursionMisses returns the total number of times the program was NOT called. -// This can happen when another bpf program is already running on the cpu, which -// is likely to happen for example when you interrupt bpf program execution. -func (pi *ProgramInfo) RecursionMisses() (uint64, bool) { - if pi.stats != nil { - return pi.stats.recursionMisses, true + spec, err := pi.btfSpec() + if err != nil { + return nil, err } - return 0, false + + return btf.LoadLineInfos( + bytes.NewReader(pi.lineInfos), + internal.NativeEndian, + pi.numLineInfos, + spec, + ) } // Instructions returns the 'xlated' instruction stream of the program @@ -326,6 +602,10 @@ func (pi *ProgramInfo) RecursionMisses() (uint64, bool) { // Available from 4.13. Requires CAP_BPF or equivalent for plain instructions. // Requires CAP_SYS_ADMIN for instructions with metadata. func (pi *ProgramInfo) Instructions() (asm.Instructions, error) { + if platform.IsWindows && len(pi.insns) == 0 { + return nil, fmt.Errorf("read instructions: %w", internal.ErrNotSupportedOnOS) + } + // If the calling process is not BPF-capable or if the kernel doesn't // support getting xlated instructions, the field will be zero. if len(pi.insns) == 0 { @@ -333,8 +613,8 @@ func (pi *ProgramInfo) Instructions() (asm.Instructions, error) { } r := bytes.NewReader(pi.insns) - var insns asm.Instructions - if err := insns.Unmarshal(r, internal.NativeEndian); err != nil { + insns, err := asm.AppendInstructions(nil, r, internal.NativeEndian, platform.Native) + if err != nil { return nil, fmt.Errorf("unmarshaling instructions: %w", err) } @@ -391,6 +671,29 @@ func (pi *ProgramInfo) Instructions() (asm.Instructions, error) { return insns, nil } +// JitedSize returns the size of the program's JIT-compiled machine code in bytes, which is the +// actual code executed on the host's CPU. This field requires the BPF JIT compiler to be enabled. +// +// Available from 4.13. Reading this metadata requires CAP_BPF or equivalent. +func (pi *ProgramInfo) JitedSize() (uint32, error) { + if pi.jitedSize == 0 { + return 0, fmt.Errorf("insufficient permissions, unsupported kernel, or JIT compiler disabled: %w", ErrNotSupported) + } + return pi.jitedSize, nil +} + +// TranslatedSize returns the size of the program's translated instructions in bytes, after it has +// been verified and rewritten by the kernel. +// +// Available from 4.13. Reading this metadata requires CAP_BPF or equivalent. +func (pi *ProgramInfo) TranslatedSize() (int, error) { + insns := len(pi.insns) + if insns == 0 { + return 0, fmt.Errorf("insufficient permissions or unsupported kernel: %w", ErrNotSupported) + } + return insns, nil +} + // MapIDs returns the maps related to the program. // // Available from 4.15. @@ -400,7 +703,120 @@ func (pi *ProgramInfo) MapIDs() ([]MapID, bool) { return pi.maps, pi.maps != nil } +// LoadTime returns when the program was loaded since boot time. +// +// Available from 4.15. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) LoadTime() (time.Duration, bool) { + // loadTime and NrMapIds were introduced in the same kernel version. + return pi.loadTime, pi.loadTime > 0 +} + +// VerifiedInstructions returns the number verified instructions in the program. +// +// Available from 5.16. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) VerifiedInstructions() (uint32, bool) { + return pi.verifiedInstructions, pi.verifiedInstructions > 0 +} + +// JitedKsymAddrs returns the ksym addresses of the BPF program, including its +// subprograms. The addresses correspond to their symbols in /proc/kallsyms. +// +// Available from 4.18. Note that before 5.x, this field can be empty for +// programs without subprograms (bpf2bpf calls). +// +// The bool return value indicates whether this optional field is available. +// +// When a kernel address can't fit into uintptr (which is usually the case when +// running 32 bit program on a 64 bit kernel), this returns an empty slice and +// a false. +func (pi *ProgramInfo) JitedKsymAddrs() ([]uintptr, bool) { + ksyms := make([]uintptr, 0, len(pi.jitedInfo.ksyms)) + if cap(ksyms) == 0 { + return ksyms, false + } + // Check if a kernel address fits into uintptr (it might not when + // using a 32 bit binary on a 64 bit kernel). This check should work + // with any kernel address, since they have 1s at the highest bits. + if a := pi.jitedInfo.ksyms[0]; uint64(uintptr(a)) != a { + return nil, false + } + for _, ksym := range pi.jitedInfo.ksyms { + ksyms = append(ksyms, uintptr(ksym)) + } + return ksyms, true +} + +// JitedInsns returns the JITed machine native instructions of the program. +// +// Available from 4.13. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) JitedInsns() ([]byte, bool) { + return pi.jitedInfo.insns, len(pi.jitedInfo.insns) > 0 +} + +// JitedLineInfos returns the JITed line infos of the program. +// +// Available from 5.0. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) JitedLineInfos() ([]uint64, bool) { + return pi.jitedInfo.lineInfos, len(pi.jitedInfo.lineInfos) > 0 +} + +// JitedFuncLens returns the insns length of each function in the JITed program. +// +// Available from 4.18. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) JitedFuncLens() ([]uint32, bool) { + return pi.jitedInfo.funcLens, len(pi.jitedInfo.funcLens) > 0 +} + +// FuncInfos returns the offset and function information of all (sub)programs in +// a BPF program. +// +// Available from 5.0. +// +// Requires CAP_SYS_ADMIN or equivalent for reading BTF information. Returns +// ErrNotSupported if the program was created without BTF or if the kernel +// doesn't support the field. +func (pi *ProgramInfo) FuncInfos() (btf.FuncOffsets, error) { + if len(pi.funcInfos) == 0 { + return nil, fmt.Errorf("insufficient permissions or unsupported kernel: %w", ErrNotSupported) + } + + spec, err := pi.btfSpec() + if err != nil { + return nil, err + } + + return btf.LoadFuncInfos( + bytes.NewReader(pi.funcInfos), + internal.NativeEndian, + pi.numFuncInfos, + spec, + ) +} + +// ProgramInfo returns an approximate number of bytes allocated to this program. +// +// Available from 4.10. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) Memlock() (uint64, bool) { + return pi.memlock, pi.memlock > 0 +} + func scanFdInfo(fd *sys.FD, fields map[string]interface{}) error { + if platform.IsWindows { + return fmt.Errorf("read fdinfo: %w", internal.ErrNotSupportedOnOS) + } + fh, err := os.Open(fmt.Sprintf("/proc/self/fdinfo/%d", fd.Int())) if err != nil { return err @@ -413,54 +829,73 @@ func scanFdInfo(fd *sys.FD, fields map[string]interface{}) error { return nil } -var errMissingFields = errors.New("missing fields") - func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error { var ( scanner = bufio.NewScanner(r) scanned int + reader bytes.Reader ) for scanner.Scan() { - parts := strings.SplitN(scanner.Text(), "\t", 2) - if len(parts) != 2 { + key, rest, found := bytes.Cut(scanner.Bytes(), []byte(":")) + if !found { + // Line doesn't contain a colon, skip. continue } - - name := strings.TrimSuffix(parts[0], ":") - field, ok := fields[string(name)] + field, ok := fields[string(key)] if !ok { continue } + // If field already contains a non-zero value, don't overwrite it with fdinfo. + if !zero(field) { + scanned++ + continue + } + + // Cut the \t following the : as well as any potential trailing whitespace. + rest = bytes.TrimSpace(rest) - if n, err := fmt.Sscanln(parts[1], field); err != nil || n != 1 { - return fmt.Errorf("can't parse field %s: %v", name, err) + reader.Reset(rest) + if n, err := fmt.Fscan(&reader, field); err != nil || n != 1 { + return fmt.Errorf("can't parse field %s: %v", key, err) } scanned++ } if err := scanner.Err(); err != nil { - return err + return fmt.Errorf("scanning fdinfo: %w", err) } if len(fields) > 0 && scanned == 0 { return ErrNotSupported } - if scanned != len(fields) { - return errMissingFields + return nil +} + +func zero(arg any) bool { + v := reflect.ValueOf(arg) + + // Unwrap pointers and interfaces. + for v.Kind() == reflect.Pointer || + v.Kind() == reflect.Interface { + v = v.Elem() } - return nil + return v.IsZero() } -// EnableStats starts the measuring of the runtime -// and run counts of eBPF programs. +// EnableStats starts collecting runtime statistics of eBPF programs, like the +// amount of program executions and the cumulative runtime. // -// Collecting statistics can have an impact on the performance. +// Specify a BPF_STATS_* constant to select which statistics to collect, like +// [unix.BPF_STATS_RUN_TIME]. Closing the returned [io.Closer] will stop +// collecting statistics. // -// Requires at least 5.8. +// Collecting statistics may have a performance impact. +// +// Requires at least Linux 5.8. func EnableStats(which uint32) (io.Closer, error) { fd, err := sys.EnableStats(&sys.EnableStatsAttr{ Type: which, @@ -471,7 +906,12 @@ func EnableStats(which uint32) (io.Closer, error) { return fd, nil } -var haveProgramInfoMapIDs = internal.NewFeatureTest("map IDs in program info", "4.15", func() error { +var haveProgramInfoMapIDs = internal.NewFeatureTest("map IDs in program info", func() error { + if platform.IsWindows { + // We only support efW versions which have this feature, no need to probe. + return nil + } + prog, err := progLoad(asm.Instructions{ asm.LoadImm(asm.R0, 0, asm.DWord), asm.Return(), @@ -496,4 +936,4 @@ var haveProgramInfoMapIDs = internal.NewFeatureTest("map IDs in program info", " } return err -}) +}, "4.15", "windows:0.21.0") diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/buffer.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/buffer.go deleted file mode 100644 index 81c6544330f8..000000000000 --- a/src/runtime/vendor/github.com/cilium/ebpf/internal/buffer.go +++ /dev/null @@ -1,31 +0,0 @@ -package internal - -import ( - "bytes" - "sync" -) - -var bytesBufferPool = sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, -} - -// NewBuffer retrieves a [bytes.Buffer] from a pool an re-initialises it. -// -// The returned buffer should be passed to [PutBuffer]. -func NewBuffer(buf []byte) *bytes.Buffer { - wr := bytesBufferPool.Get().(*bytes.Buffer) - // Reinitialize the Buffer with a new backing slice since it is returned to - // the caller by wr.Bytes() below. Pooling is faster despite calling - // NewBuffer. The pooled alloc is still reused, it only needs to be zeroed. - *wr = *bytes.NewBuffer(buf) - return wr -} - -// PutBuffer releases a buffer to the pool. -func PutBuffer(buf *bytes.Buffer) { - // Release reference to the backing buffer. - *buf = *bytes.NewBuffer(nil) - bytesBufferPool.Put(buf) -} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/deque.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/deque.go index e3a30502159b..ed113ddd7d0f 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/internal/deque.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/deque.go @@ -75,10 +75,7 @@ func (dq *Deque[T]) Grow(n int) { // Round up to the new power of two which is at least 8. // See https://jameshfisher.com/2018/03/30/round-up-power-2/ - capacity := 1 << (64 - bits.LeadingZeros64(need-1)) - if capacity < 8 { - capacity = 8 - } + capacity := max(1<<(64-bits.LeadingZeros64(need-1)), 8) elems := make([]T, have, capacity) pivot := dq.read & dq.mask diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/enums.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/enums.go new file mode 100644 index 000000000000..71320b6311e2 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/enums.go @@ -0,0 +1,65 @@ +//go:build windows + +package efw + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +/* +Converts an attach type enum into a GUID. + + ebpf_result_t ebpf_get_ebpf_attach_type( + bpf_attach_type_t bpf_attach_type, + _Out_ ebpf_attach_type_t* ebpf_attach_type_t *ebpf_attach_type) +*/ +var ebpfGetEbpfAttachTypeProc = newProc("ebpf_get_ebpf_attach_type") + +func EbpfGetEbpfAttachType(attachType uint32) (windows.GUID, error) { + addr, err := ebpfGetEbpfAttachTypeProc.Find() + if err != nil { + return windows.GUID{}, err + } + + var attachTypeGUID windows.GUID + err = errorResult(syscall.SyscallN(addr, + uintptr(attachType), + uintptr(unsafe.Pointer(&attachTypeGUID)), + )) + return attachTypeGUID, err +} + +/* +Retrieve a program type given a GUID. + + bpf_prog_type_t ebpf_get_bpf_program_type(_In_ const ebpf_program_type_t* program_type) +*/ +var ebpfGetBpfProgramTypeProc = newProc("ebpf_get_bpf_program_type") + +func EbpfGetBpfProgramType(programType windows.GUID) (uint32, error) { + addr, err := ebpfGetBpfProgramTypeProc.Find() + if err != nil { + return 0, err + } + + return uint32Result(syscall.SyscallN(addr, uintptr(unsafe.Pointer(&programType)))), nil +} + +/* +Retrieve an attach type given a GUID. + + bpf_attach_type_t ebpf_get_bpf_attach_type(_In_ const ebpf_attach_type_t* ebpf_attach_type) +*/ +var ebpfGetBpfAttachTypeProc = newProc("ebpf_get_bpf_attach_type") + +func EbpfGetBpfAttachType(attachType windows.GUID) (uint32, error) { + addr, err := ebpfGetBpfAttachTypeProc.Find() + if err != nil { + return 0, err + } + + return uint32Result(syscall.SyscallN(addr, uintptr(unsafe.Pointer(&attachType)))), nil +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/error_reporting.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/error_reporting.go new file mode 100644 index 000000000000..83b9a265ee26 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/error_reporting.go @@ -0,0 +1,155 @@ +//go:build windows + +package efw + +import ( + "errors" + "fmt" + "os" + "syscall" + "testing" + + "golang.org/x/sys/windows" +) + +func init() { + if !testing.Testing() { + return + } + + if isDebuggerPresent() { + return + } + + if err := configureCRTErrorReporting(); err != nil { + fmt.Fprintln(os.Stderr, "WARNING: Could not configure CRT error reporting, tests may hang:", err) + } +} + +var errErrorReportingAlreadyConfigured = errors.New("error reporting already configured") + +// Configure built-in error reporting of the C runtime library. +// +// The C runtime emits assertion failures into a graphical message box by default. +// This causes a hang in CI environments. This function configures the CRT to +// log to stderr instead. +func configureCRTErrorReporting() error { + const ucrtDebug = "ucrtbased.dll" + + // Constants from crtdbg.h + // + // See https://doxygen.reactos.org/da/d40/crt_2crtdbg_8h_source.html + const ( + _CRT_ERROR = 1 + _CRT_ASSERT = 2 + _CRTDBG_MODE_FILE = 0x1 + _CRTDBG_MODE_WNDW = 0x4 + _CRTDBG_HFILE_ERROR = -2 + _CRTDBG_FILE_STDERR = -4 + ) + + // Load the efW API to trigger loading the CRT. This may fail, in which case + // we can't figure out which CRT is being used. + // In that case we rely on the error bubbling up via some other path. + _ = module.Load() + + ucrtHandle, err := syscall.UTF16PtrFromString(ucrtDebug) + if err != nil { + return err + } + + var handle windows.Handle + err = windows.GetModuleHandleEx(0, ucrtHandle, &handle) + if errors.Is(err, windows.ERROR_MOD_NOT_FOUND) { + // Loading the ebpf api did not pull in the debug UCRT, so there is + // nothing to configure. + return nil + } else if err != nil { + return err + } + defer windows.FreeLibrary(handle) + + setReportModeAddr, err := windows.GetProcAddress(handle, "_CrtSetReportMode") + if err != nil { + return err + } + + setReportMode := func(reportType int, reportMode int) (int, error) { + // See https://learn.microsoft.com/en-us/cpp/c-runtime-library/reference/crtsetreportmode?view=msvc-170 + r1, _, err := syscall.SyscallN(setReportModeAddr, uintptr(reportType), uintptr(reportMode)) + if int(r1) == -1 { + return 0, fmt.Errorf("set report mode for type %d: %w", reportType, err) + } + return int(r1), nil + } + + setReportFileAddr, err := windows.GetProcAddress(handle, "_CrtSetReportFile") + if err != nil { + return err + } + + setReportFile := func(reportType int, reportFile int) (int, error) { + // See https://learn.microsoft.com/en-us/cpp/c-runtime-library/reference/crtsetreportfile?view=msvc-170 + r1, _, err := syscall.SyscallN(setReportFileAddr, uintptr(reportType), uintptr(reportFile)) + if int(r1) == _CRTDBG_HFILE_ERROR { + return 0, fmt.Errorf("set report file for type %d: %w", reportType, err) + } + return int(r1), nil + } + + reportToFile := func(reportType, defaultMode int) error { + oldMode, err := setReportMode(reportType, _CRTDBG_MODE_FILE) + if err != nil { + return err + } + + if oldMode != defaultMode { + // Attempt to restore old mode if it was different from the expected default. + _, _ = setReportMode(reportType, oldMode) + return errErrorReportingAlreadyConfigured + } + + oldFile, err := setReportFile(reportType, _CRTDBG_FILE_STDERR) + if err != nil { + return err + } + + if oldFile != -1 { + // Attempt to restore old file if it was different from the expected default. + _, _ = setReportFile(reportType, oldFile) + return errErrorReportingAlreadyConfigured + } + + return nil + } + + // See https://learn.microsoft.com/en-us/cpp/c-runtime-library/reference/crtsetreportmode?view=msvc-170#remarks + // for defaults. + if err := reportToFile(_CRT_ASSERT, _CRTDBG_MODE_WNDW); err != nil { + return err + } + + if err := reportToFile(_CRT_ERROR, _CRTDBG_MODE_WNDW); err != nil { + return err + } + + return nil +} + +// isDebuggerPresent returns true if the current process is being debugged. +// +// See https://learn.microsoft.com/en-us/windows/win32/api/debugapi/nf-debugapi-isdebuggerpresent +func isDebuggerPresent() bool { + kernel32Handle, err := windows.LoadLibrary("kernel32.dll") + if err != nil { + return false + } + + isDebuggerPresentAddr, err := windows.GetProcAddress(kernel32Handle, "IsDebuggerPresent") + if err != nil { + return false + } + + r1, _, _ := syscall.SyscallN(isDebuggerPresentAddr) + return r1 != 0 +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/fd.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/fd.go new file mode 100644 index 000000000000..b0d0bcdd4f44 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/fd.go @@ -0,0 +1,34 @@ +//go:build windows + +package efw + +import ( + "syscall" + "unsafe" +) + +// ebpf_result_t ebpf_close_fd(fd_t fd) +var ebpfCloseFdProc = newProc("ebpf_close_fd") + +func EbpfCloseFd(fd int) error { + addr, err := ebpfCloseFdProc.Find() + if err != nil { + return err + } + + return errorResult(syscall.SyscallN(addr, uintptr(fd))) +} + +// ebpf_result_t ebpf_duplicate_fd(fd_t fd, _Out_ fd_t* dup) +var ebpfDuplicateFdProc = newProc("ebpf_duplicate_fd") + +func EbpfDuplicateFd(fd int) (int, error) { + addr, err := ebpfDuplicateFdProc.Find() + if err != nil { + return -1, err + } + + var dup FD + err = errorResult(syscall.SyscallN(addr, uintptr(fd), uintptr(unsafe.Pointer(&dup)))) + return int(dup), err +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/module.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/module.go new file mode 100644 index 000000000000..606d83930a98 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/module.go @@ -0,0 +1,36 @@ +//go:build windows + +// Package efw contains support code for eBPF for Windows. +package efw + +import ( + "golang.org/x/sys/windows" +) + +// module is the global handle for the eBPF for Windows user-space API. +var module = windows.NewLazyDLL("ebpfapi.dll") + +// FD is the equivalent of fd_t. +// +// See https://github.com/microsoft/ebpf-for-windows/blob/54632eb360c560ebef2f173be1a4a4625d540744/include/ebpf_api.h#L24 +type FD int32 + +// Size is the equivalent of size_t. +// +// This is correct on amd64 and arm64 according to tests on godbolt.org. +type Size uint64 + +// Int is the equivalent of int on MSVC (am64, arm64) and MinGW (gcc, clang). +type Int int32 + +// ObjectType is the equivalent of ebpf_object_type_t. +// +// See https://github.com/microsoft/ebpf-for-windows/blob/44f5de09ec0f3f7ad176c00a290c1cb7106cdd5e/include/ebpf_core_structs.h#L41 +type ObjectType uint32 + +const ( + EBPF_OBJECT_UNKNOWN ObjectType = iota + EBPF_OBJECT_MAP + EBPF_OBJECT_LINK + EBPF_OBJECT_PROGRAM +) diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/native.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/native.go new file mode 100644 index 000000000000..04f796abbfef --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/native.go @@ -0,0 +1,44 @@ +//go:build windows + +package efw + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +/* +ebpf_result_t ebpf_object_load_native_by_fds( + + _In_z_ const char* file_name, + _Inout_ size_t* count_of_maps, + _Out_writes_opt_(count_of_maps) fd_t* map_fds, + _Inout_ size_t* count_of_programs, + _Out_writes_opt_(count_of_programs) fd_t* program_fds) +*/ +var ebpfObjectLoadNativeByFdsProc = newProc("ebpf_object_load_native_by_fds") + +func EbpfObjectLoadNativeFds(fileName string, mapFds []FD, programFds []FD) (int, int, error) { + addr, err := ebpfObjectLoadNativeByFdsProc.Find() + if err != nil { + return 0, 0, err + } + + fileBytes, err := windows.ByteSliceFromString(fileName) + if err != nil { + return 0, 0, err + } + + countOfMaps := Size(len(mapFds)) + countOfPrograms := Size(len(programFds)) + err = errorResult(syscall.SyscallN(addr, + uintptr(unsafe.Pointer(&fileBytes[0])), + uintptr(unsafe.Pointer(&countOfMaps)), + uintptr(unsafe.Pointer(&mapFds[0])), + uintptr(unsafe.Pointer(&countOfPrograms)), + uintptr(unsafe.Pointer(&programFds[0])), + )) + return int(countOfMaps), int(countOfPrograms), err +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/object.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/object.go new file mode 100644 index 000000000000..560e2f09b3ec --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/object.go @@ -0,0 +1,117 @@ +//go:build windows + +package efw + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +// https://github.com/microsoft/ebpf-for-windows/blob/9d9003c39c3fd75be5225ac0fce30077d6bf0604/include/ebpf_core_structs.h#L15 +const _EBPF_MAX_PIN_PATH_LENGTH = 256 + +/* +Retrieve object info and type from a fd. + + ebpf_result_t ebpf_object_get_info_by_fd( + fd_t bpf_fd, + _Inout_updates_bytes_to_opt_(*info_size, *info_size) void* info, + _Inout_opt_ uint32_t* info_size, + _Out_opt_ ebpf_object_type_t* type) +*/ +var ebpfObjectGetInfoByFdProc = newProc("ebpf_object_get_info_by_fd") + +func EbpfObjectGetInfoByFd(fd int, info unsafe.Pointer, info_size *uint32) (ObjectType, error) { + addr, err := ebpfObjectGetInfoByFdProc.Find() + if err != nil { + return 0, err + } + + var objectType ObjectType + err = errorResult(syscall.SyscallN(addr, + uintptr(fd), + uintptr(info), + uintptr(unsafe.Pointer(info_size)), + uintptr(unsafe.Pointer(&objectType)), + )) + return objectType, err +} + +// ebpf_result_t ebpf_object_unpin(_In_z_ const char* path) +var ebpfObjectUnpinProc = newProc("ebpf_object_unpin") + +func EbpfObjectUnpin(path string) error { + addr, err := ebpfObjectUnpinProc.Find() + if err != nil { + return err + } + + pathBytes, err := windows.ByteSliceFromString(path) + if err != nil { + return err + } + + return errorResult(syscall.SyscallN(addr, uintptr(unsafe.Pointer(&pathBytes[0])))) +} + +/* +Retrieve the next pinned object path. + + ebpf_result_t ebpf_get_next_pinned_object_path( + _In_opt_z_ const char* start_path, + _Out_writes_z_(next_path_len) char* next_path, + size_t next_path_len, + _Inout_opt_ ebpf_object_type_t* type) +*/ +var ebpfGetNextPinnedObjectPath = newProc("ebpf_get_next_pinned_object_path") + +func EbpfGetNextPinnedObjectPath(startPath string, objectType ObjectType) (string, ObjectType, error) { + addr, err := ebpfGetNextPinnedObjectPath.Find() + if err != nil { + return "", 0, err + } + + ptr, err := windows.BytePtrFromString(startPath) + if err != nil { + return "", 0, err + } + + tmp := make([]byte, _EBPF_MAX_PIN_PATH_LENGTH) + err = errorResult(syscall.SyscallN(addr, + uintptr(unsafe.Pointer(ptr)), + uintptr(unsafe.Pointer(&tmp[0])), + uintptr(len(tmp)), + uintptr(unsafe.Pointer(&objectType)), + )) + return windows.ByteSliceToString(tmp), objectType, err +} + +/* +Canonicalize a path using filesystem canonicalization rules. + + _Must_inspect_result_ ebpf_result_t + ebpf_canonicalize_pin_path(_Out_writes_(output_size) char* output, size_t output_size, _In_z_ const char* input) +*/ +var ebpfCanonicalizePinPath = newProc("ebpf_canonicalize_pin_path") + +func EbpfCanonicalizePinPath(input string) (string, error) { + addr, err := ebpfCanonicalizePinPath.Find() + if err != nil { + return "", err + } + + inputBytes, err := windows.ByteSliceFromString(input) + if err != nil { + return "", err + } + + output := make([]byte, _EBPF_MAX_PIN_PATH_LENGTH) + err = errorResult(syscall.SyscallN(addr, + uintptr(unsafe.Pointer(&output[0])), + uintptr(len(output)), + uintptr(unsafe.Pointer(&inputBytes[0])), + )) + return windows.ByteSliceToString(output), err +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/proc.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/proc.go new file mode 100644 index 000000000000..81329905fd85 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/proc.go @@ -0,0 +1,50 @@ +//go:build windows + +package efw + +import ( + "errors" + "fmt" + "syscall" + + "golang.org/x/sys/windows" +) + +/* +The BPF syscall wrapper which is ABI compatible with Linux. + + int bpf(int cmd, union bpf_attr* attr, unsigned int size) +*/ +var BPF = newProc("bpf") + +type proc struct { + proc *windows.LazyProc +} + +func newProc(name string) proc { + return proc{module.NewProc(name)} +} + +func (p proc) Find() (uintptr, error) { + if err := p.proc.Find(); err != nil { + if errors.Is(err, windows.ERROR_MOD_NOT_FOUND) { + return 0, fmt.Errorf("load %s: not found", module.Name) + } + return 0, err + } + return p.proc.Addr(), nil +} + +// uint32Result wraps a function which returns a uint32_t. +func uint32Result(r1, _ uintptr, _ syscall.Errno) uint32 { + return uint32(r1) +} + +// errorResult wraps a function which returns ebpf_result_t. +func errorResult(r1, _ uintptr, errNo syscall.Errno) error { + err := resultToError(Result(r1)) + if err != nil && errNo != 0 { + return fmt.Errorf("%w (errno: %v)", err, errNo) + } + return err +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/program.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/program.go new file mode 100644 index 000000000000..6202acf32c32 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/program.go @@ -0,0 +1,39 @@ +//go:build windows + +package efw + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +/* +Attach a program. + + ebpf_result_t ebpf_program_attach_by_fds( + fd_t program_fd, + _In_opt_ const ebpf_attach_type_t* attach_type, + _In_reads_bytes_opt_(attach_parameters_size) void* attach_parameters, + size_t attach_parameters_size, + _Out_ fd_t* link) +*/ +var ebpfProgramAttachByFdsProc = newProc("ebpf_program_attach_by_fds") + +func EbpfProgramAttachFds(fd int, attachType windows.GUID, params unsafe.Pointer, params_size uintptr) (int, error) { + addr, err := ebpfProgramAttachByFdsProc.Find() + if err != nil { + return 0, err + } + + var link FD + err = errorResult(syscall.SyscallN(addr, + uintptr(fd), + uintptr(unsafe.Pointer(&attachType)), + uintptr(params), + params_size, + uintptr(unsafe.Pointer(&link)), + )) + return int(link), err +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/result.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/result.go new file mode 100644 index 000000000000..3275941d3e6f --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/result.go @@ -0,0 +1,57 @@ +//go:build windows + +package efw + +// See https://github.com/microsoft/ebpf-for-windows/blob/main/include/ebpf_result.h +type Result int32 + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -tags windows -output result_string_windows.go -type=Result + +const ( + EBPF_SUCCESS Result = iota + EBPF_VERIFICATION_FAILED + EBPF_JIT_COMPILATION_FAILED + EBPF_PROGRAM_LOAD_FAILED + EBPF_INVALID_FD + EBPF_INVALID_OBJECT + EBPF_INVALID_ARGUMENT + EBPF_OBJECT_NOT_FOUND + EBPF_OBJECT_ALREADY_EXISTS + EBPF_FILE_NOT_FOUND + EBPF_ALREADY_PINNED + EBPF_NOT_PINNED + EBPF_NO_MEMORY + EBPF_PROGRAM_TOO_LARGE + EBPF_RPC_EXCEPTION + EBPF_ALREADY_INITIALIZED + EBPF_ELF_PARSING_FAILED + EBPF_FAILED + EBPF_OPERATION_NOT_SUPPORTED + EBPF_KEY_NOT_FOUND + EBPF_ACCESS_DENIED + EBPF_BLOCKED_BY_POLICY + EBPF_ARITHMETIC_OVERFLOW + EBPF_EXTENSION_FAILED_TO_LOAD + EBPF_INSUFFICIENT_BUFFER + EBPF_NO_MORE_KEYS + EBPF_KEY_ALREADY_EXISTS + EBPF_NO_MORE_TAIL_CALLS + EBPF_PENDING + EBPF_OUT_OF_SPACE + EBPF_CANCELED + EBPF_INVALID_POINTER + EBPF_TIMEOUT + EBPF_STALE_ID + EBPF_INVALID_STATE +) + +func (r Result) Error() string { + return r.String() +} + +func resultToError(res Result) error { + if res == EBPF_SUCCESS { + return nil + } + return res +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/result_string_windows.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/result_string_windows.go new file mode 100644 index 000000000000..1e55b5186531 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/result_string_windows.go @@ -0,0 +1,57 @@ +// Code generated by "stringer -tags windows -output result_string_windows.go -type=Result"; DO NOT EDIT. + +package efw + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[EBPF_SUCCESS-0] + _ = x[EBPF_VERIFICATION_FAILED-1] + _ = x[EBPF_JIT_COMPILATION_FAILED-2] + _ = x[EBPF_PROGRAM_LOAD_FAILED-3] + _ = x[EBPF_INVALID_FD-4] + _ = x[EBPF_INVALID_OBJECT-5] + _ = x[EBPF_INVALID_ARGUMENT-6] + _ = x[EBPF_OBJECT_NOT_FOUND-7] + _ = x[EBPF_OBJECT_ALREADY_EXISTS-8] + _ = x[EBPF_FILE_NOT_FOUND-9] + _ = x[EBPF_ALREADY_PINNED-10] + _ = x[EBPF_NOT_PINNED-11] + _ = x[EBPF_NO_MEMORY-12] + _ = x[EBPF_PROGRAM_TOO_LARGE-13] + _ = x[EBPF_RPC_EXCEPTION-14] + _ = x[EBPF_ALREADY_INITIALIZED-15] + _ = x[EBPF_ELF_PARSING_FAILED-16] + _ = x[EBPF_FAILED-17] + _ = x[EBPF_OPERATION_NOT_SUPPORTED-18] + _ = x[EBPF_KEY_NOT_FOUND-19] + _ = x[EBPF_ACCESS_DENIED-20] + _ = x[EBPF_BLOCKED_BY_POLICY-21] + _ = x[EBPF_ARITHMETIC_OVERFLOW-22] + _ = x[EBPF_EXTENSION_FAILED_TO_LOAD-23] + _ = x[EBPF_INSUFFICIENT_BUFFER-24] + _ = x[EBPF_NO_MORE_KEYS-25] + _ = x[EBPF_KEY_ALREADY_EXISTS-26] + _ = x[EBPF_NO_MORE_TAIL_CALLS-27] + _ = x[EBPF_PENDING-28] + _ = x[EBPF_OUT_OF_SPACE-29] + _ = x[EBPF_CANCELED-30] + _ = x[EBPF_INVALID_POINTER-31] + _ = x[EBPF_TIMEOUT-32] + _ = x[EBPF_STALE_ID-33] + _ = x[EBPF_INVALID_STATE-34] +} + +const _Result_name = "EBPF_SUCCESSEBPF_VERIFICATION_FAILEDEBPF_JIT_COMPILATION_FAILEDEBPF_PROGRAM_LOAD_FAILEDEBPF_INVALID_FDEBPF_INVALID_OBJECTEBPF_INVALID_ARGUMENTEBPF_OBJECT_NOT_FOUNDEBPF_OBJECT_ALREADY_EXISTSEBPF_FILE_NOT_FOUNDEBPF_ALREADY_PINNEDEBPF_NOT_PINNEDEBPF_NO_MEMORYEBPF_PROGRAM_TOO_LARGEEBPF_RPC_EXCEPTIONEBPF_ALREADY_INITIALIZEDEBPF_ELF_PARSING_FAILEDEBPF_FAILEDEBPF_OPERATION_NOT_SUPPORTEDEBPF_KEY_NOT_FOUNDEBPF_ACCESS_DENIEDEBPF_BLOCKED_BY_POLICYEBPF_ARITHMETIC_OVERFLOWEBPF_EXTENSION_FAILED_TO_LOADEBPF_INSUFFICIENT_BUFFEREBPF_NO_MORE_KEYSEBPF_KEY_ALREADY_EXISTSEBPF_NO_MORE_TAIL_CALLSEBPF_PENDINGEBPF_OUT_OF_SPACEEBPF_CANCELEDEBPF_INVALID_POINTEREBPF_TIMEOUTEBPF_STALE_IDEBPF_INVALID_STATE" + +var _Result_index = [...]uint16{0, 12, 36, 63, 87, 102, 121, 142, 163, 189, 208, 227, 242, 256, 278, 296, 320, 343, 354, 382, 400, 418, 440, 464, 493, 517, 534, 557, 580, 592, 609, 622, 642, 654, 667, 685} + +func (i Result) String() string { + if i < 0 || i >= Result(len(_Result_index)-1) { + return "Result(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Result_name[_Result_index[i]:_Result_index[i+1]] +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/structs.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/structs.go new file mode 100644 index 000000000000..558dbb86537f --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/efw/structs.go @@ -0,0 +1,36 @@ +//go:build windows + +package efw + +import "golang.org/x/sys/windows" + +// https://github.com/microsoft/ebpf-for-windows/blob/95267a53b26c68a94145d1731e2a4c8b546034c3/include/ebpf_structs.h#L366 +const _BPF_OBJ_NAME_LEN = 64 + +// See https://github.com/microsoft/ebpf-for-windows/blob/95267a53b26c68a94145d1731e2a4c8b546034c3/include/ebpf_structs.h#L372-L386 +type BpfMapInfo struct { + _ uint32 ///< Map ID. + _ uint32 ///< Type of map. + _ uint32 ///< Size in bytes of a map key. + _ uint32 ///< Size in bytes of a map value. + _ uint32 ///< Maximum number of entries allowed in the map. + Name [_BPF_OBJ_NAME_LEN]byte ///< Null-terminated map name. + _ uint32 ///< Map flags. + + _ uint32 ///< ID of inner map template. + _ uint32 ///< Number of pinned paths. +} + +// See https://github.com/microsoft/ebpf-for-windows/blob/95267a53b26c68a94145d1731e2a4c8b546034c3/include/ebpf_structs.h#L396-L410 +type BpfProgInfo struct { + _ uint32 ///< Program ID. + _ uint32 ///< Program type, if a cross-platform type. + _ uint32 ///< Number of maps associated with this program. + _ uintptr ///< Pointer to caller-allocated array to fill map IDs into. + Name [_BPF_OBJ_NAME_LEN]byte ///< Null-terminated map name. + + _ windows.GUID ///< Program type UUID. + _ windows.GUID ///< Attach type UUID. + _ uint32 ///< Number of pinned paths. + _ uint32 ///< Number of attached links. +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/endian_le.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/endian_le.go index 6dcd916d5df2..d833ea764f72 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/internal/endian_le.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/endian_le.go @@ -1,4 +1,4 @@ -//go:build 386 || amd64 || amd64p32 || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || ppc64le || riscv64 +//go:build 386 || amd64 || amd64p32 || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || ppc64le || riscv64 || wasm package internal diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/errors.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/errors.go index 83a371ad35d6..19d5294ca042 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/internal/errors.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/errors.go @@ -23,7 +23,7 @@ func ErrorWithLog(source string, err error, log []byte) *VerifierError { log = bytes.Trim(log, whitespace) if len(log) == 0 { - return &VerifierError{source, err, nil, false} + return &VerifierError{source, err, nil} } logLines := bytes.Split(log, []byte{'\n'}) @@ -34,7 +34,7 @@ func ErrorWithLog(source string, err error, log []byte) *VerifierError { lines = append(lines, string(bytes.TrimRight(line, whitespace))) } - return &VerifierError{source, err, lines, false} + return &VerifierError{source, err, lines} } // VerifierError includes information from the eBPF verifier. @@ -46,8 +46,6 @@ type VerifierError struct { Cause error // The verifier output split into lines. Log []string - // Deprecated: the log is never truncated anymore. - Truncated bool } func (le *VerifierError) Unwrap() error { diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/feature.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/feature.go index 2b856c735e7f..82b8d93956ae 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/internal/feature.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/feature.go @@ -3,15 +3,26 @@ package internal import ( "errors" "fmt" + "runtime" "sync" + + "github.com/cilium/ebpf/internal/platform" ) -// ErrNotSupported indicates that a feature is not supported by the current kernel. +// ErrNotSupported indicates that a feature is not supported. var ErrNotSupported = errors.New("not supported") +// ErrNotSupportedOnOS indicates that a feature is not supported on the current +// operating system. +var ErrNotSupportedOnOS = fmt.Errorf("%w on %s", ErrNotSupported, runtime.GOOS) + // UnsupportedFeatureError is returned by FeatureTest() functions. type UnsupportedFeatureError struct { - // The minimum Linux mainline version required for this feature. + // The minimum version required for this feature. + // + // On Linux this refers to the mainline kernel version, on other platforms + // to the version of the runtime. + // // Used for the error string, and for sanity checking during testing. MinimumVersion Version @@ -58,7 +69,25 @@ type FeatureTest struct { type FeatureTestFn func() error // NewFeatureTest is a convenient way to create a single [FeatureTest]. -func NewFeatureTest(name, version string, fn FeatureTestFn) func() error { +// +// versions specifies in which version of a BPF runtime a feature appeared. +// The format is "GOOS:Major.Minor[.Patch]". GOOS may be omitted when targeting +// Linux. Returns [ErrNotSupportedOnOS] if there is no version specified for the +// current OS. +func NewFeatureTest(name string, fn FeatureTestFn, versions ...string) func() error { + version, err := platform.SelectVersion(versions) + if err != nil { + return func() error { return err } + } + + if version == "" { + return func() error { + // We don't return an UnsupportedFeatureError here, since that will + // trigger version checks which don't make sense. + return fmt.Errorf("%s: %w", name, ErrNotSupportedOnOS) + } + } + ft := &FeatureTest{ Name: name, Version: version, @@ -132,12 +161,18 @@ type FeatureMatrix[K comparable] map[K]*FeatureTest // Result returns the outcome of the feature test for the given key. // // It's safe to call this function concurrently. +// +// Always returns [ErrNotSupportedOnOS] on Windows. func (fm FeatureMatrix[K]) Result(key K) error { ft, ok := fm[key] if !ok { return fmt.Errorf("no feature probe for %v", key) } + if platform.IsWindows { + return fmt.Errorf("%s: %w", ft.Name, ErrNotSupportedOnOS) + } + return ft.execute() } @@ -158,6 +193,10 @@ func NewFeatureCache[K comparable](newTest func(K) *FeatureTest) *FeatureCache[K } func (fc *FeatureCache[K]) Result(key K) error { + if platform.IsWindows { + return fmt.Errorf("feature probe for %v: %w", key, ErrNotSupportedOnOS) + } + // NB: Executing the feature test happens without fc.mu taken. return fc.retrieve(key).execute() } diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/kallsyms/cache.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/kallsyms/cache.go new file mode 100644 index 000000000000..b7f3e0b78198 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/kallsyms/cache.go @@ -0,0 +1,20 @@ +package kallsyms + +import "sync" + +type cache[K, V comparable] struct { + m sync.Map +} + +func (c *cache[K, V]) Load(key K) (value V, _ bool) { + v, ok := c.m.Load(key) + if !ok { + return value, false + } + value = v.(V) + return value, true +} + +func (c *cache[K, V]) Store(key K, value V) { + c.m.Store(key, value) +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/kallsyms/kallsyms.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/kallsyms/kallsyms.go index 776c7a10a28e..9154a8a79459 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/internal/kallsyms/kallsyms.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/kallsyms/kallsyms.go @@ -1,74 +1,293 @@ package kallsyms import ( - "bufio" "bytes" + "errors" + "fmt" "io" "os" - "sync" + "slices" + "strconv" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/platform" ) -var kernelModules struct { - sync.RWMutex - // function to kernel module mapping - kmods map[string]string +var errAmbiguousKsym = errors.New("multiple kernel symbols with the same name") + +var symAddrs cache[string, uint64] +var symModules cache[string, string] + +// Module returns the kernel module providing the given symbol in the kernel, if +// any. Returns an empty string and no error if the symbol is not present in the +// kernel. Only function symbols are considered. Returns an error if multiple +// symbols with the same name were found. +// +// Consider [AssignModules] if you need to resolve multiple symbols, as it will +// only perform one iteration over /proc/kallsyms. +func Module(name string) (string, error) { + if name == "" { + return "", nil + } + + if mod, ok := symModules.Load(name); ok { + return mod, nil + } + + request := map[string]string{name: ""} + if err := AssignModules(request); err != nil { + return "", err + } + + return request[name], nil } -// KernelModule returns the kernel module, if any, a probe-able function is contained in. -func KernelModule(fn string) (string, error) { - kernelModules.RLock() - kmods := kernelModules.kmods - kernelModules.RUnlock() +// AssignModules looks up the kernel module providing each given symbol, if any, +// and assigns them to their corresponding values in the symbols map. Only +// function symbols are considered. Results of all lookups are cached, +// successful or otherwise. +// +// Any symbols missing in the kernel are ignored. Returns an error if multiple +// symbols with a given name were found. +func AssignModules(symbols map[string]string) error { + if !platform.IsLinux { + return fmt.Errorf("read /proc/kallsyms: %w", internal.ErrNotSupportedOnOS) + } - if kmods == nil { - kernelModules.Lock() - defer kernelModules.Unlock() - kmods = kernelModules.kmods + if len(symbols) == 0 { + return nil } - if kmods != nil { - return kmods[fn], nil + // Attempt to fetch symbols from cache. + request := make(map[string]string) + for name := range symbols { + if mod, ok := symModules.Load(name); ok { + symbols[name] = mod + continue + } + + // Mark the symbol to be read from /proc/kallsyms. + request[name] = "" + } + if len(request) == 0 { + // All symbols satisfied from cache. + return nil } f, err := os.Open("/proc/kallsyms") if err != nil { - return "", err + return err } defer f.Close() - kmods, err = loadKernelModuleMapping(f) - if err != nil { - return "", err + + if err := assignModules(f, request); err != nil { + return fmt.Errorf("assigning symbol modules: %w", err) } - kernelModules.kmods = kmods - return kmods[fn], nil + // Update the cache with the new symbols. Cache all requested symbols, even if + // they're missing or don't belong to a module. + for name, mod := range request { + symModules.Store(name, mod) + symbols[name] = mod + } + + return nil } -// FlushKernelModuleCache removes any cached information about function to kernel module mapping. -func FlushKernelModuleCache() { - kernelModules.Lock() - defer kernelModules.Unlock() +// assignModules assigns kernel symbol modules read from f to values requested +// by symbols. Always scans the whole input to make sure the user didn't request +// an ambiguous symbol. +func assignModules(f io.Reader, symbols map[string]string) error { + if len(symbols) == 0 { + return nil + } + + found := make(map[string]struct{}) + r := newReader(f) + for r.Line() { + // Only look for function symbols in the kernel's text section (tT). + s, err, skip := parseSymbol(r, []rune{'t', 'T'}) + if err != nil { + return fmt.Errorf("parsing kallsyms line: %w", err) + } + if skip { + continue + } + + if _, requested := symbols[string(s.name)]; !requested { + continue + } + + if _, ok := found[string(s.name)]; ok { + // We've already seen this symbol. Return an error to avoid silently + // attaching to a symbol in the wrong module. libbpf also rejects + // referring to ambiguous symbols. + // + // We can't simply check if we already have a value for the given symbol, + // since many won't have an associated kernel module. + return fmt.Errorf("symbol %s: duplicate found at address 0x%x (module %q): %w", + s.name, s.addr, s.mod, errAmbiguousKsym) + } + + symbols[string(s.name)] = string(s.mod) + found[string(s.name)] = struct{}{} + } + if err := r.Err(); err != nil { + return fmt.Errorf("reading kallsyms: %w", err) + } + + return nil +} + +// Address returns the address of the given symbol in the kernel. Returns 0 and +// no error if the symbol is not present. Returns an error if multiple addresses +// were found for a symbol. +// +// Consider [AssignAddresses] if you need to resolve multiple symbols, as it +// will only perform one iteration over /proc/kallsyms. +func Address(symbol string) (uint64, error) { + if symbol == "" { + return 0, nil + } + + if addr, ok := symAddrs.Load(symbol); ok { + return addr, nil + } - kernelModules.kmods = nil + request := map[string]uint64{symbol: 0} + if err := AssignAddresses(request); err != nil { + return 0, err + } + + return request[symbol], nil } -func loadKernelModuleMapping(f io.Reader) (map[string]string, error) { - mods := make(map[string]string) - scanner := bufio.NewScanner(f) - for scanner.Scan() { - fields := bytes.Fields(scanner.Bytes()) - if len(fields) < 4 { +// AssignAddresses looks up the addresses of the requested symbols in the kernel +// and assigns them to their corresponding values in the symbols map. Results +// of all lookups are cached, successful or otherwise. +// +// Any symbols missing in the kernel are ignored. Returns an error if multiple +// addresses were found for a symbol. +func AssignAddresses(symbols map[string]uint64) error { + if !platform.IsLinux { + return fmt.Errorf("read /proc/kallsyms: %w", internal.ErrNotSupportedOnOS) + } + + if len(symbols) == 0 { + return nil + } + + // Attempt to fetch symbols from cache. + request := make(map[string]uint64) + for name := range symbols { + if addr, ok := symAddrs.Load(name); ok { + symbols[name] = addr continue } - switch string(fields[1]) { - case "t", "T": - mods[string(fields[2])] = string(bytes.Trim(fields[3], "[]")) - default: + + // Mark the symbol to be read from /proc/kallsyms. + request[name] = 0 + } + if len(request) == 0 { + // All symbols satisfied from cache. + return nil + } + + f, err := os.Open("/proc/kallsyms") + if err != nil { + return err + } + defer f.Close() + + if err := assignAddresses(f, request); err != nil { + return fmt.Errorf("loading symbol addresses: %w", err) + } + + // Update the cache with the new symbols. Cache all requested symbols even if + // they weren't found, to avoid repeated lookups. + for name, addr := range request { + symAddrs.Store(name, addr) + symbols[name] = addr + } + + return nil +} + +// assignAddresses assigns kernel symbol addresses read from f to values +// requested by symbols. Always scans the whole input to make sure the user +// didn't request an ambiguous symbol. +func assignAddresses(f io.Reader, symbols map[string]uint64) error { + if len(symbols) == 0 { + return nil + } + r := newReader(f) + for r.Line() { + s, err, skip := parseSymbol(r, nil) + if err != nil { + return fmt.Errorf("parsing kallsyms line: %w", err) + } + if skip { continue } + + existing, requested := symbols[string(s.name)] + if existing != 0 { + // Multiple addresses for a symbol have been found. Return a friendly + // error to avoid silently attaching to the wrong symbol. libbpf also + // rejects referring to ambiguous symbols. + return fmt.Errorf("symbol %s(0x%x): duplicate found at address 0x%x: %w", s.name, existing, s.addr, errAmbiguousKsym) + } + if requested { + symbols[string(s.name)] = s.addr + } } - if scanner.Err() != nil { - return nil, scanner.Err() + if err := r.Err(); err != nil { + return fmt.Errorf("reading kallsyms: %w", err) } - return mods, nil + + return nil +} + +type ksym struct { + addr uint64 + name []byte + mod []byte +} + +// parseSymbol parses a line from /proc/kallsyms into an address, type, name and +// module. Skip will be true if the symbol doesn't match any of the given symbol +// types. See `man 1 nm` for all available types. +// +// Only yields symbols whose type is contained in types. An empty value for types +// disables this filtering. +// +// Example line: `ffffffffc1682010 T nf_nat_init\t[nf_nat]` +func parseSymbol(r *reader, types []rune) (s ksym, err error, skip bool) { + for i := 0; r.Word(); i++ { + switch i { + // Address of the symbol. + case 0: + s.addr, err = strconv.ParseUint(r.Text(), 16, 64) + if err != nil { + return s, fmt.Errorf("parsing address: %w", err), false + } + // Type of the symbol. Assume the character is ASCII-encoded by converting + // it directly to a rune, since it's a fixed field controlled by the kernel. + case 1: + if len(types) > 0 && !slices.Contains(types, rune(r.Bytes()[0])) { + return s, nil, true + } + // Name of the symbol. + case 2: + s.name = r.Bytes() + // Kernel module the symbol is provided by. + case 3: + s.mod = bytes.Trim(r.Bytes(), "[]") + // Ignore any future fields. + default: + return + } + } + + return } diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/kallsyms/reader.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/kallsyms/reader.go new file mode 100644 index 000000000000..3011e83f6816 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/kallsyms/reader.go @@ -0,0 +1,89 @@ +package kallsyms + +import ( + "bufio" + "bytes" + "io" +) + +// reader is a line and word-oriented reader built for reading /proc/kallsyms. +// It takes an io.Reader and iterates its contents line by line, then word by +// word. +// +// It's designed to allow partial reading of lines without paying the cost of +// allocating objects that will never be accessed, resulting in less work for +// the garbage collector. +type reader struct { + s *bufio.Scanner + line []byte + word []byte + + err error +} + +func newReader(r io.Reader) *reader { + return &reader{ + s: bufio.NewScanner(r), + } +} + +// Bytes returns the current word as a byte slice. +func (r *reader) Bytes() []byte { + return r.word +} + +// Text returns the output of Bytes as a string. +func (r *reader) Text() string { + return string(r.Bytes()) +} + +// Line advances the reader to the next line in the input. Calling Line resets +// the current word, making [reader.Bytes] and [reader.Text] return empty +// values. Follow this up with a call to [reader.Word]. +// +// Like [bufio.Scanner], [reader.Err] needs to be checked after Line returns +// false to determine if an error occurred during reading. +// +// Returns true if Line can be called again. Returns false if all lines in the +// input have been read. +func (r *reader) Line() bool { + for r.s.Scan() { + line := r.s.Bytes() + if len(line) == 0 { + continue + } + + r.line = line + r.word = nil + + return true + } + if err := r.s.Err(); err != nil { + r.err = err + } + + return false +} + +// Word advances the reader to the next word in the current line. +// +// Returns true if a word is found and Word should be called again. Returns +// false when all words on the line have been read. +func (r *reader) Word() bool { + line := bytes.TrimSpace(r.line) + + if len(line) == 0 { + return false + } + + var found bool + r.word, r.line, found = bytes.Cut(line, []byte{' '}) + if !found { + r.word, r.line, _ = bytes.Cut(line, []byte{'\t'}) + } + return true +} + +func (r *reader) Err() error { + return r.err +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go index 1921e4f15ad7..29c62b6266ed 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go @@ -1,3 +1,4 @@ +// Package kconfig implements a parser for the format of Linux's .config file. package kconfig import ( @@ -7,7 +8,6 @@ import ( "fmt" "io" "math" - "os" "strconv" "strings" @@ -15,30 +15,6 @@ import ( "github.com/cilium/ebpf/internal" ) -// Find find a kconfig file on the host. -// It first reads from /boot/config- of the current running kernel and tries -// /proc/config.gz if nothing was found in /boot. -// If none of the file provide a kconfig, it returns an error. -func Find() (*os.File, error) { - kernelRelease, err := internal.KernelRelease() - if err != nil { - return nil, fmt.Errorf("cannot get kernel release: %w", err) - } - - path := "/boot/config-" + kernelRelease - f, err := os.Open(path) - if err == nil { - return f, nil - } - - f, err = os.Open("/proc/config.gz") - if err == nil { - return f, nil - } - - return nil, fmt.Errorf("neither %s nor /proc/config.gz provide a kconfig", path) -} - // Parse parses the kconfig file for which a reader is given. // All the CONFIG_* which are in filter and which are set set will be // put in the returned map as key with their corresponding value as map value. @@ -127,12 +103,13 @@ func PutValue(data []byte, typ btf.Type, value string) error { switch value { case "y", "n", "m": return putValueTri(data, typ, value) - default: - if strings.HasPrefix(value, `"`) { - return putValueString(data, typ, value) - } - return putValueNumber(data, typ, value) } + + if strings.HasPrefix(value, `"`) { + return putValueString(data, typ, value) + } + + return putValueNumber(data, typ, value) } // Golang translation of libbpf_tristate enum: @@ -169,6 +146,10 @@ func putValueTri(data []byte, typ btf.Type, value string) error { return fmt.Errorf("cannot use enum %q, only libbpf_tristate is supported", v.Name) } + if len(data) != 4 { + return fmt.Errorf("expected enum value to occupy 4 bytes in datasec, got: %d", len(data)) + } + var tri triState switch value { case "y": @@ -178,10 +159,10 @@ func putValueTri(data []byte, typ btf.Type, value string) error { case "n": tri = TriNo default: - return fmt.Errorf("value %q is not support for libbpf_tristate", value) + return fmt.Errorf("value %q is not supported for libbpf_tristate", value) } - internal.NativeEndian.PutUint64(data, uint64(tri)) + internal.NativeEndian.PutUint32(data, uint32(tri)) default: return fmt.Errorf("cannot add number value, expected btf.Int or btf.Enum, got: %T", v) } diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/auxv.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/linux/auxv.go similarity index 65% rename from src/runtime/vendor/github.com/cilium/ebpf/internal/auxv.go rename to src/runtime/vendor/github.com/cilium/ebpf/internal/linux/auxv.go index 45fd0d37f132..a864d6b4a9dc 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/internal/auxv.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/linux/auxv.go @@ -1,9 +1,12 @@ -package internal +package linux import ( - "errors" + "fmt" "io" - _ "unsafe" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/platform" + "github.com/cilium/ebpf/internal/unix" ) type auxvPairReader interface { @@ -17,11 +20,8 @@ const ( _AT_SYSINFO_EHDR = 33 // Offset to vDSO blob in process image ) -//go:linkname runtime_getAuxv runtime.getAuxv -func runtime_getAuxv() []uintptr - type auxvRuntimeReader struct { - data []uintptr + data [][2]uintptr index int } @@ -37,20 +37,23 @@ func (r *auxvRuntimeReader) ReadAuxvPair() (uint64, uint64, error) { // we manually add the (_AT_NULL, _AT_NULL) pair at the end // that is not provided by the go runtime var tag, value uintptr - if r.index+1 < len(r.data) { - tag, value = r.data[r.index], r.data[r.index+1] + if r.index < len(r.data) { + tag, value = r.data[r.index][0], r.data[r.index][1] } else { tag, value = _AT_NULL, _AT_NULL } - r.index += 2 + r.index += 1 return uint64(tag), uint64(value), nil } func newAuxvRuntimeReader() (auxvPairReader, error) { - data := runtime_getAuxv() + if !platform.IsLinux { + return nil, fmt.Errorf("read auxv from runtime: %w", internal.ErrNotSupportedOnOS) + } - if len(data)%2 != 0 { - return nil, errors.New("malformed auxv passed from runtime") + data, err := unix.Auxv() + if err != nil { + return nil, fmt.Errorf("read auxv from runtime: %w", err) } return &auxvRuntimeReader{ diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/linux/cpu.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/linux/cpu.go new file mode 100644 index 000000000000..bd55ac915e12 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/linux/cpu.go @@ -0,0 +1,45 @@ +package linux + +import ( + "fmt" + "os" + "strings" +) + +func ParseCPUsFromFile(path string) (int, error) { + spec, err := os.ReadFile(path) + if err != nil { + return 0, err + } + + n, err := parseCPUs(string(spec)) + if err != nil { + return 0, fmt.Errorf("can't parse %s: %v", path, err) + } + + return n, nil +} + +// parseCPUs parses the number of cpus from a string produced +// by bitmap_list_string() in the Linux kernel. +// Multiple ranges are rejected, since they can't be unified +// into a single number. +// This is the format of /sys/devices/system/cpu/possible, it +// is not suitable for /sys/devices/system/cpu/online, etc. +func parseCPUs(spec string) (int, error) { + if strings.Trim(spec, "\n") == "0" { + return 1, nil + } + + var low, high int + n, err := fmt.Sscanf(spec, "%d-%d\n", &low, &high) + if n != 2 || err != nil { + return 0, fmt.Errorf("invalid format: %s", spec) + } + if low != 0 { + return 0, fmt.Errorf("CPU spec doesn't start at zero: %s", spec) + } + + // cpus is 0 indexed + return high + 1, nil +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/linux/doc.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/linux/doc.go new file mode 100644 index 000000000000..064e75437d83 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/linux/doc.go @@ -0,0 +1,2 @@ +// Package linux contains OS specific wrappers around package unix. +package linux diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/linux/kconfig.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/linux/kconfig.go new file mode 100644 index 000000000000..1488ecb35c36 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/linux/kconfig.go @@ -0,0 +1,31 @@ +package linux + +import ( + "fmt" + "os" +) + +// FindKConfig searches for a kconfig file on the host. +// +// It first reads from /boot/config- of the current running kernel and tries +// /proc/config.gz if nothing was found in /boot. +// If none of the file provide a kconfig, it returns an error. +func FindKConfig() (*os.File, error) { + kernelRelease, err := KernelRelease() + if err != nil { + return nil, fmt.Errorf("cannot get kernel release: %w", err) + } + + path := "/boot/config-" + kernelRelease + f, err := os.Open(path) + if err == nil { + return f, nil + } + + f, err = os.Open("/proc/config.gz") + if err == nil { + return f, nil + } + + return nil, fmt.Errorf("neither %s nor /proc/config.gz provide a kconfig", path) +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/platform.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/linux/platform.go similarity index 97% rename from src/runtime/vendor/github.com/cilium/ebpf/internal/platform.go rename to src/runtime/vendor/github.com/cilium/ebpf/internal/linux/platform.go index 6e90f2ef7148..39bdcc51f9a3 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/internal/platform.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/linux/platform.go @@ -1,4 +1,4 @@ -package internal +package linux import ( "runtime" diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/statfs.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/linux/statfs.go similarity index 96% rename from src/runtime/vendor/github.com/cilium/ebpf/internal/statfs.go rename to src/runtime/vendor/github.com/cilium/ebpf/internal/linux/statfs.go index 44c02d676e6d..e268c06fab6c 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/internal/statfs.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/linux/statfs.go @@ -1,4 +1,4 @@ -package internal +package linux import ( "unsafe" diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/vdso.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/linux/vdso.go similarity index 93% rename from src/runtime/vendor/github.com/cilium/ebpf/internal/vdso.go rename to src/runtime/vendor/github.com/cilium/ebpf/internal/linux/vdso.go index 1049278554e0..1d8d0ef6b110 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/internal/vdso.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/linux/vdso.go @@ -1,4 +1,4 @@ -package internal +package linux import ( "debug/elf" @@ -9,6 +9,7 @@ import ( "math" "os" + "github.com/cilium/ebpf/internal" "github.com/cilium/ebpf/internal/unix" ) @@ -82,7 +83,7 @@ type elfNoteHeader struct { // vdsoLinuxVersionCode returns the LINUX_VERSION_CODE embedded in // the ELF notes section of the binary provided by the reader. func vdsoLinuxVersionCode(r io.ReaderAt) (uint32, error) { - hdr, err := NewSafeELFFile(r) + hdr, err := internal.NewSafeELFFile(r) if err != nil { return 0, fmt.Errorf("reading vDSO ELF: %w", err) } @@ -110,7 +111,7 @@ func vdsoLinuxVersionCode(r io.ReaderAt) (uint32, error) { var name string if n.NameSize > 0 { // Read the note name, aligned to 4 bytes. - buf := make([]byte, Align(n.NameSize, 4)) + buf := make([]byte, internal.Align(n.NameSize, 4)) if err := binary.Read(sr, hdr.ByteOrder, &buf); err != nil { return 0, fmt.Errorf("reading note name: %w", err) } @@ -132,7 +133,7 @@ func vdsoLinuxVersionCode(r io.ReaderAt) (uint32, error) { } // Discard the note descriptor if it exists but we're not interested in it. - if _, err := io.CopyN(io.Discard, sr, int64(Align(n.DescSize, 4))); err != nil { + if _, err := io.CopyN(io.Discard, sr, int64(internal.Align(n.DescSize, 4))); err != nil { return 0, err } } diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/linux/version.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/linux/version.go new file mode 100644 index 000000000000..798dd3fed02d --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/linux/version.go @@ -0,0 +1,34 @@ +package linux + +import ( + "fmt" + "sync" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/unix" +) + +// KernelVersion returns the version of the currently running kernel. +var KernelVersion = sync.OnceValues(detectKernelVersion) + +// detectKernelVersion returns the version of the running kernel. +func detectKernelVersion() (internal.Version, error) { + vc, err := vdsoVersion() + if err != nil { + return internal.Version{}, err + } + return internal.NewVersionFromCode(vc), nil +} + +// KernelRelease returns the release string of the running kernel. +// Its format depends on the Linux distribution and corresponds to directory +// names in /lib/modules by convention. Some examples are 5.15.17-1-lts and +// 4.19.0-16-amd64. +func KernelRelease() (string, error) { + var uname unix.Utsname + if err := unix.Uname(&uname); err != nil { + return "", fmt.Errorf("uname failed: %w", err) + } + + return unix.ByteSliceToString(uname.Release[:]), nil +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/math.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/math.go index e95c8efde51a..10cde66860d6 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/internal/math.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/math.go @@ -1,13 +1,33 @@ package internal -import "golang.org/x/exp/constraints" - // Align returns 'n' updated to 'alignment' boundary. -func Align[I constraints.Integer](n, alignment I) I { +func Align[I Integer](n, alignment I) I { return (n + alignment - 1) / alignment * alignment } // IsPow returns true if n is a power of two. -func IsPow[I constraints.Integer](n I) bool { +func IsPow[I Integer](n I) bool { return n != 0 && (n&(n-1)) == 0 } + +// Between returns the value clamped between a and b. +func Between[I Integer](val, a, b I) I { + lower, upper := a, b + if lower > upper { + upper, lower = a, b + } + + val = min(val, upper) + return max(val, lower) +} + +// Integer represents all possible integer types. +// Remove when x/exp/constraints is moved to the standard library. +type Integer interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr +} + +// List of integer types known by the Go compiler. Used by TestIntegerConstraint +// to warn if a new integer type is introduced. Remove when x/exp/constraints +// is moved to the standard library. +var integers = []string{"int", "int8", "int16", "int32", "int64", "uint", "uint8", "uint16", "uint32", "uint64", "uintptr"} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/output.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/output.go index dd6e6cbafe0e..bcbb6818df77 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/internal/output.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/output.go @@ -92,6 +92,11 @@ func GoTypeName(t any) string { for rT.Kind() == reflect.Pointer { rT = rT.Elem() } - // Doesn't return the correct Name for generic types due to https://github.com/golang/go/issues/55924 - return rT.Name() + + name := rT.Name() + if pkgPath := rT.PkgPath(); pkgPath != "" { + name = strings.ReplaceAll(name, pkgPath+".", "") + } + + return name } diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/platform/constants.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/platform/constants.go new file mode 100644 index 000000000000..b57ae1e59fdb --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/platform/constants.go @@ -0,0 +1,62 @@ +package platform + +import "fmt" + +// Values used to tag platform specific constants. +// +// The value for Linux is zero so that existing constants do not change. +const ( + LinuxTag = uint32(iota) << platformShift + WindowsTag +) + +const ( + platformMax = 1<<3 - 1 // most not exceed 3 bits to avoid setting the high bit + platformShift = 28 + platformMask = platformMax << platformShift +) + +func tagForPlatform(platform string) (uint32, error) { + switch platform { + case Linux: + return LinuxTag, nil + case Windows: + return WindowsTag, nil + default: + return 0, fmt.Errorf("unrecognized platform: %s", platform) + } +} + +func platformForConstant(c uint32) string { + tag := uint32(c & platformMask) + switch tag { + case LinuxTag: + return Linux + case WindowsTag: + return Windows + default: + return "" + } +} + +// Encode a platform and a value into a tagged constant. +// +// Returns an error if platform is unknown or c is out of bounds. +func EncodeConstant[T ~uint32](platform string, c uint32) (T, error) { + if c>>platformShift > 0 { + return 0, fmt.Errorf("invalid constant 0x%x", c) + } + + tag, err := tagForPlatform(platform) + if err != nil { + return 0, err + } + + return T(tag | c), nil +} + +// Decode a platform and a value from a tagged constant. +func DecodeConstant[T ~uint32](c T) (string, uint32) { + v := uint32(c) & ^uint32(platformMask) + return platformForConstant(uint32(c)), v +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/platform/platform.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/platform/platform.go new file mode 100644 index 000000000000..1c5bad396f56 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/platform/platform.go @@ -0,0 +1,42 @@ +package platform + +import ( + "errors" + "runtime" + "strings" +) + +const ( + Linux = "linux" + Windows = "windows" +) + +const ( + IsLinux = runtime.GOOS == "linux" + IsWindows = runtime.GOOS == "windows" +) + +// SelectVersion extracts the platform-appropriate version from a list of strings like +// `linux:6.1` or `windows:0.20.0`. +// +// Returns an empty string and nil if no version matched or an error if no strings were passed. +func SelectVersion(versions []string) (string, error) { + const prefix = runtime.GOOS + ":" + + if len(versions) == 0 { + return "", errors.New("no versions specified") + } + + for _, version := range versions { + if after, ok := strings.CutPrefix(version, prefix); ok { + return after, nil + } + + if IsLinux && !strings.ContainsRune(version, ':') { + // Allow version numbers without a GOOS prefix on Linux. + return version, nil + } + } + + return "", nil +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/platform/platform_linux.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/platform/platform_linux.go new file mode 100644 index 000000000000..f0aa240dc2a6 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/platform/platform_linux.go @@ -0,0 +1,3 @@ +package platform + +const Native = Linux diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/platform/platform_other.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/platform/platform_other.go new file mode 100644 index 000000000000..cd33b3f6858d --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/platform/platform_other.go @@ -0,0 +1,5 @@ +//go:build !linux && !windows + +package platform + +const Native = "" diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/platform/platform_windows.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/platform/platform_windows.go new file mode 100644 index 000000000000..26b4a8ecb3a4 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/platform/platform_windows.go @@ -0,0 +1,3 @@ +package platform + +const Native = Windows diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/fd.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/fd.go index 941a56fb91b1..7e769fb54e85 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/fd.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/fd.go @@ -1,31 +1,25 @@ package sys import ( - "fmt" "math" - "os" "runtime" "strconv" + "github.com/cilium/ebpf/internal/testutils/testmain" "github.com/cilium/ebpf/internal/unix" ) var ErrClosedFd = unix.EBADF -type FD struct { - raw int -} +// A value for an invalid fd. +// +// Luckily this is consistent across Linux and Windows. +// +// See https://github.com/microsoft/ebpf-for-windows/blob/54632eb360c560ebef2f173be1a4a4625d540744/include/ebpf_api.h#L25 +const invalidFd = -1 func newFD(value int) *FD { - if onLeakFD != nil { - // Attempt to store the caller's stack for the given fd value. - // Panic if fds contains an existing stack for the fd. - old, exist := fds.LoadOrStore(value, callersFrames()) - if exist { - f := old.(*runtime.Frames) - panic(fmt.Sprintf("found existing stack for fd %d:\n%s", value, FormatFrames(f))) - } - } + testmain.TraceFD(value, 1) fd := &FD{value} runtime.SetFinalizer(fd, (*FD).finalize) @@ -35,51 +29,25 @@ func newFD(value int) *FD { // finalize is set as the FD's runtime finalizer and // sends a leak trace before calling FD.Close(). func (fd *FD) finalize() { - if fd.raw < 0 { + if fd.raw == invalidFd { return } - // Invoke the fd leak callback. Calls LoadAndDelete to guarantee the callback - // is invoked at most once for one sys.FD allocation, runtime.Frames can only - // be unwound once. - f, ok := fds.LoadAndDelete(fd.Int()) - if ok && onLeakFD != nil { - onLeakFD(f.(*runtime.Frames)) - } + testmain.LeakFD(fd.raw) _ = fd.Close() } -// NewFD wraps a raw fd with a finalizer. -// -// You must not use the raw fd after calling this function, since the underlying -// file descriptor number may change. This is because the BPF UAPI assumes that -// zero is not a valid fd value. -func NewFD(value int) (*FD, error) { - if value < 0 { - return nil, fmt.Errorf("invalid fd %d", value) - } - - fd := newFD(value) - if value != 0 { - return fd, nil - } - - dup, err := fd.Dup() - _ = fd.Close() - return dup, err -} - func (fd *FD) String() string { return strconv.FormatInt(int64(fd.raw), 10) } func (fd *FD) Int() int { - return fd.raw + return int(fd.raw) } func (fd *FD) Uint() uint32 { - if fd.raw < 0 || int64(fd.raw) > math.MaxUint32 { + if fd.raw == invalidFd { // Best effort: this is the number most likely to be an invalid file // descriptor. It is equal to -1 (on two's complement arches). return math.MaxUint32 @@ -87,47 +55,14 @@ func (fd *FD) Uint() uint32 { return uint32(fd.raw) } -func (fd *FD) Close() error { - if fd.raw < 0 { - return nil - } - - return unix.Close(fd.disown()) -} - -func (fd *FD) disown() int { - value := int(fd.raw) - fds.Delete(int(value)) - fd.raw = -1 +// Disown destroys the FD and returns its raw file descriptor without closing +// it. After this call, the underlying fd is no longer tied to the FD's +// lifecycle. +func (fd *FD) Disown() int { + value := fd.raw + testmain.ForgetFD(value) + fd.raw = invalidFd runtime.SetFinalizer(fd, nil) return value } - -func (fd *FD) Dup() (*FD, error) { - if fd.raw < 0 { - return nil, ErrClosedFd - } - - // Always require the fd to be larger than zero: the BPF API treats the value - // as "no argument provided". - dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 1) - if err != nil { - return nil, fmt.Errorf("can't dup fd: %v", err) - } - - return newFD(dup), nil -} - -// File takes ownership of FD and turns it into an [*os.File]. -// -// You must not use the FD after the call returns. -// -// Returns nil if the FD is not valid. -func (fd *FD) File(name string) *os.File { - if fd.raw < 0 { - return nil - } - - return os.NewFile(uintptr(fd.disown()), name) -} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/fd_other.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/fd_other.go new file mode 100644 index 000000000000..47057395ec6d --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/fd_other.go @@ -0,0 +1,70 @@ +//go:build !windows + +package sys + +import ( + "fmt" + "os" + + "github.com/cilium/ebpf/internal/unix" +) + +type FD struct { + raw int +} + +// NewFD wraps a raw fd with a finalizer. +// +// You must not use the raw fd after calling this function, since the underlying +// file descriptor number may change. This is because the BPF UAPI assumes that +// zero is not a valid fd value. +func NewFD(value int) (*FD, error) { + if value < 0 { + return nil, fmt.Errorf("invalid fd %d", value) + } + + fd := newFD(value) + if value != 0 { + return fd, nil + } + + dup, err := fd.Dup() + _ = fd.Close() + return dup, err +} + +func (fd *FD) Close() error { + if fd.raw < 0 { + return nil + } + + return unix.Close(fd.Disown()) +} + +func (fd *FD) Dup() (*FD, error) { + if fd.raw < 0 { + return nil, ErrClosedFd + } + + // Always require the fd to be larger than zero: the BPF API treats the value + // as "no argument provided". + dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 1) + if err != nil { + return nil, fmt.Errorf("can't dup fd: %v", err) + } + + return newFD(dup), nil +} + +// File takes ownership of FD and turns it into an [*os.File]. +// +// You must not use the FD after the call returns. +// +// Returns [ErrClosedFd] if the fd is not valid. +func (fd *FD) File(name string) (*os.File, error) { + if fd.raw == invalidFd { + return nil, ErrClosedFd + } + + return os.NewFile(uintptr(fd.Disown()), name), nil +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/fd_trace.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/fd_trace.go deleted file mode 100644 index cd50dd1f6428..000000000000 --- a/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/fd_trace.go +++ /dev/null @@ -1,93 +0,0 @@ -package sys - -import ( - "bytes" - "fmt" - "runtime" - "sync" -) - -// OnLeakFD controls tracing [FD] lifetime to detect resources that are not -// closed by Close(). -// -// If fn is not nil, tracing is enabled for all FDs created going forward. fn is -// invoked for all FDs that are closed by the garbage collector instead of an -// explicit Close() by a caller. Calling OnLeakFD twice with a non-nil fn -// (without disabling tracing in the meantime) will cause a panic. -// -// If fn is nil, tracing will be disabled. Any FDs that have not been closed are -// considered to be leaked, fn will be invoked for them, and the process will be -// terminated. -// -// fn will be invoked at most once for every unique sys.FD allocation since a -// runtime.Frames can only be unwound once. -func OnLeakFD(fn func(*runtime.Frames)) { - // Enable leak tracing if new fn is provided. - if fn != nil { - if onLeakFD != nil { - panic("OnLeakFD called twice with non-nil fn") - } - - onLeakFD = fn - return - } - - // fn is nil past this point. - - if onLeakFD == nil { - return - } - - // Call onLeakFD for all open fds. - if fs := flushFrames(); len(fs) != 0 { - for _, f := range fs { - onLeakFD(f) - } - } - - onLeakFD = nil -} - -var onLeakFD func(*runtime.Frames) - -// fds is a registry of all file descriptors wrapped into sys.fds that were -// created while an fd tracer was active. -var fds sync.Map // map[int]*runtime.Frames - -// flushFrames removes all elements from fds and returns them as a slice. This -// deals with the fact that a runtime.Frames can only be unwound once using -// Next(). -func flushFrames() []*runtime.Frames { - var frames []*runtime.Frames - fds.Range(func(key, value any) bool { - frames = append(frames, value.(*runtime.Frames)) - fds.Delete(key) - return true - }) - return frames -} - -func callersFrames() *runtime.Frames { - c := make([]uintptr, 32) - - // Skip runtime.Callers and this function. - i := runtime.Callers(2, c) - if i == 0 { - return nil - } - - return runtime.CallersFrames(c) -} - -// FormatFrames formats a runtime.Frames as a human-readable string. -func FormatFrames(fs *runtime.Frames) string { - var b bytes.Buffer - for { - f, more := fs.Next() - b.WriteString(fmt.Sprintf("\t%s+%#x\n\t\t%s:%d\n", f.Function, f.PC-f.Entry, f.File, f.Line)) - if !more { - break - } - } - return b.String() -} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/fd_windows.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/fd_windows.go new file mode 100644 index 000000000000..f3927638233b --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/fd_windows.go @@ -0,0 +1,58 @@ +package sys + +import ( + "fmt" + "os" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/efw" +) + +// FD wraps a handle which is managed by the eBPF for Windows runtime. +// +// It is not equivalent to a real file descriptor or handle. +type FD struct { + raw int +} + +// NewFD wraps a raw fd with a finalizer. +// +// You must not use the raw fd after calling this function. +func NewFD(value int) (*FD, error) { + if value == invalidFd { + return nil, fmt.Errorf("invalid fd %d", value) + } + + if value == 0 { + // The efW runtime never uses zero fd it seems. No need to dup it. + return nil, fmt.Errorf("invalid zero fd") + } + + return newFD(value), nil +} + +func (fd *FD) Close() error { + if fd.raw == invalidFd { + return nil + } + + return efw.EbpfCloseFd(fd.Disown()) +} + +func (fd *FD) Dup() (*FD, error) { + if fd.raw == invalidFd { + return nil, ErrClosedFd + } + + dup, err := efw.EbpfDuplicateFd(fd.raw) + if err != nil { + return nil, err + } + + return NewFD(int(dup)) +} + +// File is not implemented. +func (fd *FD) File(name string) (*os.File, error) { + return nil, fmt.Errorf("file from fd: %w", internal.ErrNotSupportedOnOS) +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/mapflags_string.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/mapflags_string.go deleted file mode 100644 index d9fe217222bf..000000000000 --- a/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/mapflags_string.go +++ /dev/null @@ -1,53 +0,0 @@ -// Code generated by "stringer -type MapFlags"; DO NOT EDIT. - -package sys - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[BPF_F_NO_PREALLOC-1] - _ = x[BPF_F_NO_COMMON_LRU-2] - _ = x[BPF_F_NUMA_NODE-4] - _ = x[BPF_F_RDONLY-8] - _ = x[BPF_F_WRONLY-16] - _ = x[BPF_F_STACK_BUILD_ID-32] - _ = x[BPF_F_ZERO_SEED-64] - _ = x[BPF_F_RDONLY_PROG-128] - _ = x[BPF_F_WRONLY_PROG-256] - _ = x[BPF_F_CLONE-512] - _ = x[BPF_F_MMAPABLE-1024] - _ = x[BPF_F_PRESERVE_ELEMS-2048] - _ = x[BPF_F_INNER_MAP-4096] - _ = x[BPF_F_LINK-8192] - _ = x[BPF_F_PATH_FD-16384] -} - -const _MapFlags_name = "BPF_F_NO_PREALLOCBPF_F_NO_COMMON_LRUBPF_F_NUMA_NODEBPF_F_RDONLYBPF_F_WRONLYBPF_F_STACK_BUILD_IDBPF_F_ZERO_SEEDBPF_F_RDONLY_PROGBPF_F_WRONLY_PROGBPF_F_CLONEBPF_F_MMAPABLEBPF_F_PRESERVE_ELEMSBPF_F_INNER_MAPBPF_F_LINKBPF_F_PATH_FD" - -var _MapFlags_map = map[MapFlags]string{ - 1: _MapFlags_name[0:17], - 2: _MapFlags_name[17:36], - 4: _MapFlags_name[36:51], - 8: _MapFlags_name[51:63], - 16: _MapFlags_name[63:75], - 32: _MapFlags_name[75:95], - 64: _MapFlags_name[95:110], - 128: _MapFlags_name[110:127], - 256: _MapFlags_name[127:144], - 512: _MapFlags_name[144:155], - 1024: _MapFlags_name[155:169], - 2048: _MapFlags_name[169:189], - 4096: _MapFlags_name[189:204], - 8192: _MapFlags_name[204:214], - 16384: _MapFlags_name[214:227], -} - -func (i MapFlags) String() string { - if str, ok := _MapFlags_map[i]; ok { - return str - } - return "MapFlags(" + strconv.FormatInt(int64(i), 10) + ")" -} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/pinning.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/pinning_other.go similarity index 77% rename from src/runtime/vendor/github.com/cilium/ebpf/internal/pinning.go rename to src/runtime/vendor/github.com/cilium/ebpf/internal/sys/pinning_other.go index 01d892f93444..96ad43abd3e2 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/internal/pinning.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/pinning_other.go @@ -1,4 +1,6 @@ -package internal +//go:build !windows + +package sys import ( "errors" @@ -7,11 +9,11 @@ import ( "path/filepath" "runtime" - "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/linux" "github.com/cilium/ebpf/internal/unix" ) -func Pin(currentPath, newPath string, fd *sys.FD) error { +func Pin(currentPath, newPath string, fd *FD) error { if newPath == "" { return errors.New("given pinning path cannot be empty") } @@ -19,7 +21,7 @@ func Pin(currentPath, newPath string, fd *sys.FD) error { return nil } - fsType, err := FSType(filepath.Dir(newPath)) + fsType, err := linux.FSType(filepath.Dir(newPath)) if err != nil { return err } @@ -30,8 +32,8 @@ func Pin(currentPath, newPath string, fd *sys.FD) error { defer runtime.KeepAlive(fd) if currentPath == "" { - return sys.ObjPin(&sys.ObjPinAttr{ - Pathname: sys.NewStringPointer(newPath), + return ObjPin(&ObjPinAttr{ + Pathname: NewStringPointer(newPath), BpfFd: fd.Uint(), }) } @@ -47,8 +49,8 @@ func Pin(currentPath, newPath string, fd *sys.FD) error { return fmt.Errorf("unable to move pinned object to new path %v: %w", newPath, err) } // Internal state not in sync with the file system so let's fix it. - return sys.ObjPin(&sys.ObjPinAttr{ - Pathname: sys.NewStringPointer(newPath), + return ObjPin(&ObjPinAttr{ + Pathname: NewStringPointer(newPath), BpfFd: fd.Uint(), }) } diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/pinning_windows.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/pinning_windows.go new file mode 100644 index 000000000000..c8ab685500e6 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/pinning_windows.go @@ -0,0 +1,44 @@ +package sys + +import ( + "errors" + "runtime" + + "github.com/cilium/ebpf/internal/efw" +) + +func Pin(currentPath, newPath string, fd *FD) error { + defer runtime.KeepAlive(fd) + + if newPath == "" { + return errors.New("given pinning path cannot be empty") + } + if currentPath == newPath { + return nil + } + + if currentPath == "" { + return ObjPin(&ObjPinAttr{ + Pathname: NewStringPointer(newPath), + BpfFd: fd.Uint(), + }) + } + + return ObjPin(&ObjPinAttr{ + Pathname: NewStringPointer(newPath), + BpfFd: fd.Uint(), + }) +} + +func Unpin(pinnedPath string) error { + if pinnedPath == "" { + return nil + } + + err := efw.EbpfObjectUnpin(pinnedPath) + if err != nil && !errors.Is(err, efw.EBPF_KEY_NOT_FOUND) { + return err + } + + return nil +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/ptr.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/ptr.go index e9bb59059730..173665c2a9dc 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/ptr.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/ptr.go @@ -6,35 +6,53 @@ import ( "github.com/cilium/ebpf/internal/unix" ) -// NewPointer creates a 64-bit pointer from an unsafe Pointer. -func NewPointer(ptr unsafe.Pointer) Pointer { +// UnsafePointer creates a 64-bit pointer from an unsafe Pointer. +func UnsafePointer(ptr unsafe.Pointer) Pointer { return Pointer{ptr: ptr} } -// NewSlicePointer creates a 64-bit pointer from a byte slice. -func NewSlicePointer(buf []byte) Pointer { +// UnsafeSlicePointer creates an untyped [Pointer] from a slice. +func UnsafeSlicePointer[T comparable](buf []T) Pointer { if len(buf) == 0 { return Pointer{} } - return Pointer{ptr: unsafe.Pointer(&buf[0])} + return Pointer{ptr: unsafe.Pointer(unsafe.SliceData(buf))} } -// NewSlicePointerLen creates a 64-bit pointer from a byte slice. +// TypedPointer points to typed memory. // -// Useful to assign both the pointer and the length in one go. -func NewSlicePointerLen(buf []byte) (Pointer, uint32) { - return NewSlicePointer(buf), uint32(len(buf)) +// It is like a *T except that it accounts for the BPF syscall interface. +type TypedPointer[T any] struct { + _ [0]*T // prevent TypedPointer[a] to be convertible to TypedPointer[b] + ptr Pointer } -// NewStringPointer creates a 64-bit pointer from a string. -func NewStringPointer(str string) Pointer { - p, err := unix.BytePtrFromString(str) +// SlicePointer creates a [TypedPointer] from a slice. +func SlicePointer[T comparable](s []T) TypedPointer[T] { + return TypedPointer[T]{ptr: UnsafeSlicePointer(s)} +} + +// StringPointer points to a null-terminated string. +type StringPointer struct { + _ [0]string + ptr Pointer +} + +// NewStringPointer creates a [StringPointer] from a string. +func NewStringPointer(str string) StringPointer { + slice, err := unix.ByteSliceFromString(str) if err != nil { - return Pointer{} + return StringPointer{} } - return Pointer{ptr: unsafe.Pointer(p)} + return StringPointer{ptr: Pointer{ptr: unsafe.Pointer(&slice[0])}} +} + +// StringSlicePointer points to a slice of [StringPointer]. +type StringSlicePointer struct { + _ [0][]string + ptr Pointer } // NewStringSlicePointer allocates an array of Pointers to each string in the @@ -42,11 +60,11 @@ func NewStringPointer(str string) Pointer { // resulting array. // // Use this function to pass arrays of strings as syscall arguments. -func NewStringSlicePointer(strings []string) Pointer { - sp := make([]Pointer, 0, len(strings)) +func NewStringSlicePointer(strings []string) StringSlicePointer { + sp := make([]StringPointer, 0, len(strings)) for _, s := range strings { sp = append(sp, NewStringPointer(s)) } - return Pointer{ptr: unsafe.Pointer(&sp[0])} + return StringSlicePointer{ptr: Pointer{ptr: unsafe.Pointer(&sp[0])}} } diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go index 6278c79c9ef1..0b0feeb7a6f4 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go @@ -3,12 +3,14 @@ package sys import ( + "structs" "unsafe" ) // Pointer wraps an unsafe.Pointer to be 64bit to // conform to the syscall specification. type Pointer struct { + structs.HostLayout pad uint32 ptr unsafe.Pointer } diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go index c27b537e8e08..f9007fe84bcc 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go @@ -3,12 +3,14 @@ package sys import ( + "structs" "unsafe" ) // Pointer wraps an unsafe.Pointer to be 64bit to // conform to the syscall specification. type Pointer struct { + structs.HostLayout ptr unsafe.Pointer pad uint32 } diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go index 2d7828230ae7..05196cca7444 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go @@ -3,11 +3,13 @@ package sys import ( + "structs" "unsafe" ) // Pointer wraps an unsafe.Pointer to be 64bit to // conform to the syscall specification. type Pointer struct { + structs.HostLayout ptr unsafe.Pointer } diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/signals.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/signals.go index e5337191d69d..e75e96052049 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/signals.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/signals.go @@ -1,3 +1,5 @@ +//go:build !windows + package sys import ( diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/syscall.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/syscall.go index f6b6e9345802..f2fffd26b786 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/syscall.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/syscall.go @@ -2,7 +2,6 @@ package sys import ( "runtime" - "syscall" "unsafe" "github.com/cilium/ebpf/internal/unix" @@ -11,37 +10,7 @@ import ( // ENOTSUPP is a Linux internal error code that has leaked into UAPI. // // It is not the same as ENOTSUP or EOPNOTSUPP. -const ENOTSUPP = syscall.Errno(524) - -// BPF wraps SYS_BPF. -// -// Any pointers contained in attr must use the Pointer type from this package. -func BPF(cmd Cmd, attr unsafe.Pointer, size uintptr) (uintptr, error) { - // Prevent the Go profiler from repeatedly interrupting the verifier, - // which could otherwise lead to a livelock due to receiving EAGAIN. - if cmd == BPF_PROG_LOAD || cmd == BPF_PROG_RUN { - maskProfilerSignal() - defer unmaskProfilerSignal() - } - - for { - r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size) - runtime.KeepAlive(attr) - - // As of ~4.20 the verifier can be interrupted by a signal, - // and returns EAGAIN in that case. - if errNo == unix.EAGAIN && cmd == BPF_PROG_LOAD { - continue - } - - var err error - if errNo != 0 { - err = wrappedErrno{errNo} - } - - return r1, err - } -} +const ENOTSUPP = unix.Errno(524) // Info is implemented by all structs that can be passed to the ObjInfo syscall. // @@ -125,7 +94,7 @@ func ObjInfo(fd *FD, info Info) error { err := ObjGetInfoByFd(&ObjGetInfoByFdAttr{ BpfFd: fd.Uint(), InfoLen: len, - Info: NewPointer(ptr), + Info: UnsafePointer(ptr), }) runtime.KeepAlive(fd) return err @@ -133,12 +102,12 @@ func ObjInfo(fd *FD, info Info) error { // BPFObjName is a null-terminated string made up of // 'A-Za-z0-9_' characters. -type ObjName [unix.BPF_OBJ_NAME_LEN]byte +type ObjName [BPF_OBJ_NAME_LEN]byte // NewObjName truncates the result if it is too long. func NewObjName(name string) ObjName { var result ObjName - copy(result[:unix.BPF_OBJ_NAME_LEN-1], name) + copy(result[:BPF_OBJ_NAME_LEN-1], name) return result } @@ -151,6 +120,12 @@ const ( BPF_LOG_STATS ) +// MapID uniquely identifies a bpf_map. +type MapID uint32 + +// ProgramID uniquely identifies a bpf_map. +type ProgramID uint32 + // LinkID uniquely identifies a bpf_link. type LinkID uint32 @@ -160,29 +135,6 @@ type BTFID uint32 // TypeID identifies a type in a BTF blob. type TypeID uint32 -// MapFlags control map behaviour. -type MapFlags uint32 - -//go:generate go run golang.org/x/tools/cmd/stringer@latest -type MapFlags - -const ( - BPF_F_NO_PREALLOC MapFlags = 1 << iota - BPF_F_NO_COMMON_LRU - BPF_F_NUMA_NODE - BPF_F_RDONLY - BPF_F_WRONLY - BPF_F_STACK_BUILD_ID - BPF_F_ZERO_SEED - BPF_F_RDONLY_PROG - BPF_F_WRONLY_PROG - BPF_F_CLONE - BPF_F_MMAPABLE - BPF_F_PRESERVE_ELEMS - BPF_F_INNER_MAP - BPF_F_LINK - BPF_F_PATH_FD -) - // Flags used by bpf_mprog. const ( BPF_F_REPLACE = 1 << (iota + 2) @@ -192,12 +144,22 @@ const ( BPF_F_LINK_MPROG = 1 << 13 // aka BPF_F_LINK ) -// wrappedErrno wraps syscall.Errno to prevent direct comparisons with +// Flags used by BPF_PROG_LOAD. +const ( + BPF_F_SLEEPABLE = 1 << 4 + BPF_F_XDP_HAS_FRAGS = 1 << 5 + BPF_F_XDP_DEV_BOUND_ONLY = 1 << 6 +) + +const BPF_TAG_SIZE = 8 +const BPF_OBJ_NAME_LEN = 16 + +// wrappedErrno wraps [unix.Errno] to prevent direct comparisons with // syscall.E* or unix.E* constants. // // You should never export an error of this type. type wrappedErrno struct { - syscall.Errno + unix.Errno } func (we wrappedErrno) Unwrap() error { @@ -213,10 +175,10 @@ func (we wrappedErrno) Error() string { type syscallError struct { error - errno syscall.Errno + errno unix.Errno } -func Error(err error, errno syscall.Errno) error { +func Error(err error, errno unix.Errno) error { return &syscallError{err, errno} } diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/syscall_other.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/syscall_other.go new file mode 100644 index 000000000000..b99e6e462d85 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/syscall_other.go @@ -0,0 +1,84 @@ +//go:build !windows + +package sys + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + "unsafe" + + "github.com/cilium/ebpf/internal/unix" +) + +// BPF wraps SYS_BPF. +// +// Any pointers contained in attr must use the Pointer type from this package. +func BPF(cmd Cmd, attr unsafe.Pointer, size uintptr) (uintptr, error) { + // Prevent the Go profiler from repeatedly interrupting the verifier, + // which could otherwise lead to a livelock due to receiving EAGAIN. + if cmd == BPF_PROG_LOAD || cmd == BPF_PROG_RUN { + maskProfilerSignal() + defer unmaskProfilerSignal() + } + + for { + r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size) + runtime.KeepAlive(attr) + + // As of ~4.20 the verifier can be interrupted by a signal, + // and returns EAGAIN in that case. + if errNo == unix.EAGAIN && cmd == BPF_PROG_LOAD { + continue + } + + var err error + if errNo != 0 { + err = wrappedErrno{errNo} + } + + return r1, err + } +} + +// ObjGetTyped wraps [ObjGet] with a readlink call to extract the type of the +// underlying bpf object. +func ObjGetTyped(attr *ObjGetAttr) (*FD, ObjType, error) { + fd, err := ObjGet(attr) + if err != nil { + return nil, 0, err + } + + typ, err := readType(fd) + if err != nil { + _ = fd.Close() + return nil, 0, fmt.Errorf("reading fd type: %w", err) + } + + return fd, typ, nil +} + +// readType returns the bpf object type of the file descriptor by calling +// readlink(3). Returns an error if the file descriptor does not represent a bpf +// object. +func readType(fd *FD) (ObjType, error) { + s, err := os.Readlink(filepath.Join("/proc/self/fd/", fd.String())) + if err != nil { + return 0, fmt.Errorf("readlink fd %d: %w", fd.Int(), err) + } + + s = strings.TrimPrefix(s, "anon_inode:") + + switch s { + case "bpf-map": + return BPF_TYPE_MAP, nil + case "bpf-prog": + return BPF_TYPE_PROG, nil + case "bpf-link": + return BPF_TYPE_LINK, nil + } + + return 0, fmt.Errorf("unknown type %s of fd %d", s, fd.Int()) +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/syscall_windows.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/syscall_windows.go new file mode 100644 index 000000000000..08f73805c606 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/syscall_windows.go @@ -0,0 +1,69 @@ +package sys + +import ( + "fmt" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/efw" + "github.com/cilium/ebpf/internal/unix" +) + +// BPF calls the BPF syscall wrapper in ebpfapi.dll. +// +// Any pointers contained in attr must use the Pointer type from this package. +// +// The implementation lives in https://github.com/microsoft/ebpf-for-windows/blob/main/libs/api/bpf_syscall.cpp +func BPF(cmd Cmd, attr unsafe.Pointer, size uintptr) (uintptr, error) { + // On Linux we need to guard against preemption by the profiler here. On + // Windows it seems like a cgocall may not be preempted: + // https://github.com/golang/go/blob/8b51146c698bcfcc2c2b73fa9390db5230f2ce0a/src/runtime/os_windows.go#L1240-L1246 + + addr, err := efw.BPF.Find() + if err != nil { + return 0, err + } + + // Using [LazyProc.Call] forces attr to escape, which isn't the case when using syscall.Syscall directly. + r1, _, lastError := syscall.SyscallN(addr, uintptr(cmd), uintptr(attr), size) + + if ret := int(efw.Int(r1)); ret < 0 { + errNo := unix.Errno(-ret) + if errNo == unix.EINVAL && lastError == windows.ERROR_CALL_NOT_IMPLEMENTED { + return 0, internal.ErrNotSupportedOnOS + } + return 0, wrappedErrno{errNo} + } + + return r1, nil +} + +// ObjGetTyped retrieves an pinned object and its type. +func ObjGetTyped(attr *ObjGetAttr) (*FD, ObjType, error) { + fd, err := ObjGet(attr) + if err != nil { + return nil, 0, err + } + + efwType, err := efw.EbpfObjectGetInfoByFd(fd.Int(), nil, nil) + if err != nil { + _ = fd.Close() + return nil, 0, err + } + + switch efwType { + case efw.EBPF_OBJECT_UNKNOWN: + return fd, BPF_TYPE_UNSPEC, nil + case efw.EBPF_OBJECT_MAP: + return fd, BPF_TYPE_MAP, nil + case efw.EBPF_OBJECT_LINK: + return fd, BPF_TYPE_LINK, nil + case efw.EBPF_OBJECT_PROGRAM: + return fd, BPF_TYPE_PROG, nil + default: + return nil, 0, fmt.Errorf("unrecognized object type %v", efwType) + } +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/types.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/types.go index 70e754de71d4..2847f17ed924 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/types.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/sys/types.go @@ -3,9 +3,180 @@ package sys import ( + "structs" "unsafe" ) +const ( + BPF_ADJ_ROOM_ENCAP_L2_MASK = 255 + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56 + BPF_ANY = 0 + BPF_CSUM_LEVEL_DEC = 2 + BPF_CSUM_LEVEL_INC = 1 + BPF_CSUM_LEVEL_QUERY = 0 + BPF_CSUM_LEVEL_RESET = 3 + BPF_EXIST = 2 + BPF_FIB_LKUP_RET_BLACKHOLE = 1 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 8 + BPF_FIB_LKUP_RET_FWD_DISABLED = 5 + BPF_FIB_LKUP_RET_NOT_FWDED = 4 + BPF_FIB_LKUP_RET_NO_NEIGH = 7 + BPF_FIB_LKUP_RET_NO_SRC_ADDR = 9 + BPF_FIB_LKUP_RET_PROHIBIT = 3 + BPF_FIB_LKUP_RET_SUCCESS = 0 + BPF_FIB_LKUP_RET_UNREACHABLE = 2 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 6 + BPF_FIB_LOOKUP_DIRECT = 1 + BPF_FIB_LOOKUP_MARK = 32 + BPF_FIB_LOOKUP_OUTPUT = 2 + BPF_FIB_LOOKUP_SKIP_NEIGH = 4 + BPF_FIB_LOOKUP_SRC = 16 + BPF_FIB_LOOKUP_TBID = 8 + BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = 1 + BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = 4 + BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = 2 + BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = 128 + BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = 256 + BPF_F_ADJ_ROOM_ENCAP_L2_ETH = 64 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 16 + BPF_F_ADJ_ROOM_FIXED_GSO = 1 + BPF_F_ADJ_ROOM_NO_CSUM_RESET = 32 + BPF_F_BPRM_SECUREEXEC = 1 + BPF_F_BROADCAST = 8 + BPF_F_CLONE = 512 + BPF_F_CTXLEN_MASK = 4503595332403200 + BPF_F_CURRENT_CPU = 4294967295 + BPF_F_CURRENT_NETNS = 18446744073709551615 + BPF_F_DONT_FRAGMENT = 4 + BPF_F_EXCLUDE_INGRESS = 16 + BPF_F_FAST_STACK_CMP = 512 + BPF_F_GET_BRANCH_RECORDS_SIZE = 1 + BPF_F_HDR_FIELD_MASK = 15 + BPF_F_INDEX_MASK = 4294967295 + BPF_F_INGRESS = 1 + BPF_F_INNER_MAP = 4096 + BPF_F_INVALIDATE_HASH = 2 + BPF_F_KPROBE_MULTI_RETURN = 1 + BPF_F_LINK = 8192 + BPF_F_LOCK = 4 + BPF_F_MARK_ENFORCE = 64 + BPF_F_MARK_MANGLED_0 = 32 + BPF_F_MMAPABLE = 1024 + BPF_F_NEIGH = 2 + BPF_F_NEXTHOP = 8 + BPF_F_NO_COMMON_LRU = 2 + BPF_F_NO_PREALLOC = 1 + BPF_F_NO_TUNNEL_KEY = 16 + BPF_F_NO_USER_CONV = 262144 + BPF_F_NUMA_NODE = 4 + BPF_F_PATH_FD = 16384 + BPF_F_PEER = 4 + BPF_F_PRESERVE_ELEMS = 2048 + BPF_F_PSEUDO_HDR = 16 + BPF_F_RDONLY = 8 + BPF_F_RDONLY_PROG = 128 + BPF_F_RECOMPUTE_CSUM = 1 + BPF_F_REUSE_STACKID = 1024 + BPF_F_SEGV_ON_FAULT = 131072 + BPF_F_SEQ_NUMBER = 8 + BPF_F_SKIP_FIELD_MASK = 255 + BPF_F_STACK_BUILD_ID = 32 + BPF_F_SYSCTL_BASE_NAME = 1 + BPF_F_TIMER_ABS = 1 + BPF_F_TIMER_CPU_PIN = 2 + BPF_F_TOKEN_FD = 65536 + BPF_F_TUNINFO_FLAGS = 16 + BPF_F_TUNINFO_IPV6 = 1 + BPF_F_UPROBE_MULTI_RETURN = 1 + BPF_F_USER_BUILD_ID = 2048 + BPF_F_USER_STACK = 256 + BPF_F_VTYPE_BTF_OBJ_FD = 32768 + BPF_F_WRONLY = 16 + BPF_F_WRONLY_PROG = 256 + BPF_F_ZERO_CSUM_TX = 2 + BPF_F_ZERO_SEED = 64 + BPF_LOAD_HDR_OPT_TCP_SYN = 1 + BPF_LOCAL_STORAGE_GET_F_CREATE = 1 + BPF_MAX_LOOPS = 8388608 + BPF_MAX_TRAMP_LINKS = 38 + BPF_NOEXIST = 1 + BPF_RB_AVAIL_DATA = 0 + BPF_RB_CONS_POS = 2 + BPF_RB_FORCE_WAKEUP = 2 + BPF_RB_NO_WAKEUP = 1 + BPF_RB_PROD_POS = 3 + BPF_RB_RING_SIZE = 1 + BPF_REG_0 = 0 + BPF_REG_1 = 1 + BPF_REG_10 = 10 + BPF_REG_2 = 2 + BPF_REG_3 = 3 + BPF_REG_4 = 4 + BPF_REG_5 = 5 + BPF_REG_6 = 6 + BPF_REG_7 = 7 + BPF_REG_8 = 8 + BPF_REG_9 = 9 + BPF_RINGBUF_BUSY_BIT = 2147483648 + BPF_RINGBUF_DISCARD_BIT = 1073741824 + BPF_RINGBUF_HDR_SZ = 8 + BPF_SKB_CLOCK_MONOTONIC = 1 + BPF_SKB_CLOCK_REALTIME = 0 + BPF_SKB_CLOCK_TAI = 2 + BPF_SKB_TSTAMP_DELIVERY_MONO = 1 + BPF_SKB_TSTAMP_UNSPEC = 0 + BPF_SK_LOOKUP_F_NO_REUSEPORT = 2 + BPF_SK_LOOKUP_F_REPLACE = 1 + BPF_SK_STORAGE_GET_F_CREATE = 1 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 4 + BPF_SOCK_OPS_ALL_CB_FLAGS = 127 + BPF_SOCK_OPS_BASE_RTT = 7 + BPF_SOCK_OPS_HDR_OPT_LEN_CB = 14 + BPF_SOCK_OPS_NEEDS_ECN = 6 + BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = 16 + BPF_SOCK_OPS_PARSE_HDR_OPT_CB = 13 + BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = 32 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 5 + BPF_SOCK_OPS_RETRANS_CB = 9 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 2 + BPF_SOCK_OPS_RTO_CB = 8 + BPF_SOCK_OPS_RTO_CB_FLAG = 1 + BPF_SOCK_OPS_RTT_CB = 12 + BPF_SOCK_OPS_RTT_CB_FLAG = 8 + BPF_SOCK_OPS_RWND_INIT = 2 + BPF_SOCK_OPS_STATE_CB = 10 + BPF_SOCK_OPS_STATE_CB_FLAG = 4 + BPF_SOCK_OPS_TCP_CONNECT_CB = 3 + BPF_SOCK_OPS_TCP_LISTEN_CB = 11 + BPF_SOCK_OPS_TIMEOUT_INIT = 1 + BPF_SOCK_OPS_VOID = 0 + BPF_SOCK_OPS_WRITE_HDR_OPT_CB = 15 + BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = 64 + BPF_TASK_ITER_ALL_PROCS = 0 + BPF_TASK_ITER_ALL_THREADS = 1 + BPF_TASK_ITER_PROC_THREADS = 2 + BPF_TCP_BOUND_INACTIVE = 13 + BPF_TCP_CLOSE = 7 + BPF_TCP_CLOSE_WAIT = 8 + BPF_TCP_CLOSING = 11 + BPF_TCP_ESTABLISHED = 1 + BPF_TCP_FIN_WAIT1 = 4 + BPF_TCP_FIN_WAIT2 = 5 + BPF_TCP_LAST_ACK = 9 + BPF_TCP_LISTEN = 10 + BPF_TCP_MAX_STATES = 14 + BPF_TCP_NEW_SYN_RECV = 12 + BPF_TCP_SYN_RECV = 3 + BPF_TCP_SYN_SENT = 2 + BPF_TCP_TIME_WAIT = 6 + BPF_WRITE_HDR_TCP_CURRENT_MSS = 1 + BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2 + BPF_XFRM_STATE_OPTS_SZ = 36 +) + type AdjRoomMode uint32 const ( @@ -72,7 +243,8 @@ const ( BPF_CGROUP_UNIX_GETSOCKNAME AttachType = 53 BPF_NETKIT_PRIMARY AttachType = 54 BPF_NETKIT_PEER AttachType = 55 - __MAX_BPF_ATTACH_TYPE AttachType = 56 + BPF_TRACE_KPROBE_SESSION AttachType = 56 + __MAX_BPF_ATTACH_TYPE AttachType = 57 ) type Cmd uint32 @@ -115,6 +287,8 @@ const ( BPF_ITER_CREATE Cmd = 33 BPF_LINK_DETACH Cmd = 34 BPF_PROG_BIND_MAP Cmd = 35 + BPF_TOKEN_CREATE Cmd = 36 + __MAX_BPF_CMD Cmd = 37 ) type FunctionId uint32 @@ -359,7 +533,8 @@ const ( BPF_LINK_TYPE_TCX LinkType = 11 BPF_LINK_TYPE_UPROBE_MULTI LinkType = 12 BPF_LINK_TYPE_NETKIT LinkType = 13 - __MAX_BPF_LINK_TYPE LinkType = 14 + BPF_LINK_TYPE_SOCKMAP LinkType = 14 + __MAX_BPF_LINK_TYPE LinkType = 15 ) type MapType uint32 @@ -400,6 +575,17 @@ const ( BPF_MAP_TYPE_BLOOM_FILTER MapType = 30 BPF_MAP_TYPE_USER_RINGBUF MapType = 31 BPF_MAP_TYPE_CGRP_STORAGE MapType = 32 + BPF_MAP_TYPE_ARENA MapType = 33 + __MAX_BPF_MAP_TYPE MapType = 34 +) + +type ObjType uint32 + +const ( + BPF_TYPE_UNSPEC ObjType = 0 + BPF_TYPE_PROG ObjType = 1 + BPF_TYPE_MAP ObjType = 2 + BPF_TYPE_LINK ObjType = 3 ) type PerfEventType uint32 @@ -450,6 +636,7 @@ const ( BPF_PROG_TYPE_SK_LOOKUP ProgType = 30 BPF_PROG_TYPE_SYSCALL ProgType = 31 BPF_PROG_TYPE_NETFILTER ProgType = 32 + __MAX_BPF_PROG_TYPE ProgType = 33 ) type RetCode uint32 @@ -503,20 +690,23 @@ const ( ) type BtfInfo struct { - Btf Pointer + _ structs.HostLayout + Btf TypedPointer[uint8] BtfSize uint32 Id BTFID - Name Pointer + Name TypedPointer[uint8] NameLen uint32 KernelBtf uint32 } type FuncInfo struct { + _ structs.HostLayout InsnOff uint32 TypeId uint32 } type LineInfo struct { + _ structs.HostLayout InsnOff uint32 FileNameOff uint32 LineOff uint32 @@ -524,6 +714,7 @@ type LineInfo struct { } type LinkInfo struct { + _ structs.HostLayout Type LinkType Id LinkID ProgId uint32 @@ -532,12 +723,13 @@ type LinkInfo struct { } type MapInfo struct { + _ structs.HostLayout Type uint32 - Id uint32 + Id MapID KeySize uint32 ValueSize uint32 MaxEntries uint32 - MapFlags MapFlags + MapFlags uint32 Name ObjName Ifindex uint32 BtfVmlinuxValueTypeId TypeID @@ -546,22 +738,23 @@ type MapInfo struct { BtfId uint32 BtfKeyTypeId TypeID BtfValueTypeId TypeID - _ [4]byte + BtfVmlinuxId uint32 MapExtra uint64 } type ProgInfo struct { + _ structs.HostLayout Type uint32 Id uint32 Tag [8]uint8 JitedProgLen uint32 XlatedProgLen uint32 - JitedProgInsns uint64 - XlatedProgInsns Pointer + JitedProgInsns TypedPointer[uint8] + XlatedProgInsns TypedPointer[uint8] LoadTime uint64 CreatedByUid uint32 NrMapIds uint32 - MapIds Pointer + MapIds TypedPointer[MapID] Name ObjName Ifindex uint32 _ [4]byte /* unsupported bitfield */ @@ -569,15 +762,15 @@ type ProgInfo struct { NetnsIno uint64 NrJitedKsyms uint32 NrJitedFuncLens uint32 - JitedKsyms uint64 - JitedFuncLens uint64 + JitedKsyms TypedPointer[uint64] + JitedFuncLens TypedPointer[uint32] BtfId BTFID FuncInfoRecSize uint32 - FuncInfo Pointer + FuncInfo TypedPointer[uint8] NrFuncInfo uint32 NrLineInfo uint32 - LineInfo Pointer - JitedLineInfo uint64 + LineInfo TypedPointer[uint8] + JitedLineInfo TypedPointer[uint64] NrJitedLineInfo uint32 LineInfoRecSize uint32 JitedLineInfoRecSize uint32 @@ -593,6 +786,7 @@ type ProgInfo struct { } type SkLookup struct { + _ structs.HostLayout Cookie uint64 Family uint32 Protocol uint32 @@ -608,6 +802,7 @@ type SkLookup struct { } type XdpMd struct { + _ structs.HostLayout Data uint32 DataEnd uint32 DataMeta uint32 @@ -616,7 +811,10 @@ type XdpMd struct { EgressIfindex uint32 } -type BtfGetFdByIdAttr struct{ Id uint32 } +type BtfGetFdByIdAttr struct { + _ structs.HostLayout + Id uint32 +} func BtfGetFdById(attr *BtfGetFdByIdAttr) (*FD, error) { fd, err := BPF(BPF_BTF_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) @@ -627,6 +825,7 @@ func BtfGetFdById(attr *BtfGetFdByIdAttr) (*FD, error) { } type BtfGetNextIdAttr struct { + _ structs.HostLayout Id BTFID NextId BTFID } @@ -637,12 +836,15 @@ func BtfGetNextId(attr *BtfGetNextIdAttr) error { } type BtfLoadAttr struct { - Btf Pointer - BtfLogBuf Pointer + _ structs.HostLayout + Btf TypedPointer[uint8] + BtfLogBuf TypedPointer[uint8] BtfSize uint32 BtfLogSize uint32 BtfLogLevel uint32 BtfLogTrueSize uint32 + BtfFlags uint32 + BtfTokenFd int32 } func BtfLoad(attr *BtfLoadAttr) (*FD, error) { @@ -653,7 +855,10 @@ func BtfLoad(attr *BtfLoadAttr) (*FD, error) { return NewFD(int(fd)) } -type EnableStatsAttr struct{ Type uint32 } +type EnableStatsAttr struct { + _ structs.HostLayout + Type uint32 +} func EnableStats(attr *EnableStatsAttr) (*FD, error) { fd, err := BPF(BPF_ENABLE_STATS, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) @@ -664,6 +869,7 @@ func EnableStats(attr *EnableStatsAttr) (*FD, error) { } type IterCreateAttr struct { + _ structs.HostLayout LinkFd uint32 Flags uint32 } @@ -677,6 +883,7 @@ func IterCreate(attr *IterCreateAttr) (*FD, error) { } type LinkCreateAttr struct { + _ structs.HostLayout ProgFd uint32 TargetFd uint32 AttachType AttachType @@ -694,6 +901,7 @@ func LinkCreate(attr *LinkCreateAttr) (*FD, error) { } type LinkCreateIterAttr struct { + _ structs.HostLayout ProgFd uint32 TargetFd uint32 AttachType AttachType @@ -712,15 +920,16 @@ func LinkCreateIter(attr *LinkCreateIterAttr) (*FD, error) { } type LinkCreateKprobeMultiAttr struct { + _ structs.HostLayout ProgFd uint32 TargetFd uint32 AttachType AttachType Flags uint32 KprobeMultiFlags uint32 Count uint32 - Syms Pointer - Addrs Pointer - Cookies Pointer + Syms StringSlicePointer + Addrs TypedPointer[uintptr] + Cookies TypedPointer[uint64] _ [16]byte } @@ -733,6 +942,7 @@ func LinkCreateKprobeMulti(attr *LinkCreateKprobeMultiAttr) (*FD, error) { } type LinkCreateNetfilterAttr struct { + _ structs.HostLayout ProgFd uint32 TargetFd uint32 AttachType AttachType @@ -753,6 +963,7 @@ func LinkCreateNetfilter(attr *LinkCreateNetfilterAttr) (*FD, error) { } type LinkCreateNetkitAttr struct { + _ structs.HostLayout ProgFd uint32 TargetIfindex uint32 AttachType AttachType @@ -772,6 +983,7 @@ func LinkCreateNetkit(attr *LinkCreateNetkitAttr) (*FD, error) { } type LinkCreatePerfEventAttr struct { + _ structs.HostLayout ProgFd uint32 TargetFd uint32 AttachType AttachType @@ -789,6 +1001,7 @@ func LinkCreatePerfEvent(attr *LinkCreatePerfEventAttr) (*FD, error) { } type LinkCreateTcxAttr struct { + _ structs.HostLayout ProgFd uint32 TargetIfindex uint32 AttachType AttachType @@ -808,6 +1021,7 @@ func LinkCreateTcx(attr *LinkCreateTcxAttr) (*FD, error) { } type LinkCreateTracingAttr struct { + _ structs.HostLayout ProgFd uint32 TargetFd uint32 AttachType AttachType @@ -827,14 +1041,15 @@ func LinkCreateTracing(attr *LinkCreateTracingAttr) (*FD, error) { } type LinkCreateUprobeMultiAttr struct { + _ structs.HostLayout ProgFd uint32 TargetFd uint32 AttachType AttachType Flags uint32 - Path Pointer - Offsets Pointer - RefCtrOffsets Pointer - Cookies Pointer + Path StringPointer + Offsets TypedPointer[uint64] + RefCtrOffsets TypedPointer[uint64] + Cookies TypedPointer[uint64] Count uint32 UprobeMultiFlags uint32 Pid uint32 @@ -849,7 +1064,10 @@ func LinkCreateUprobeMulti(attr *LinkCreateUprobeMultiAttr) (*FD, error) { return NewFD(int(fd)) } -type LinkGetFdByIdAttr struct{ Id LinkID } +type LinkGetFdByIdAttr struct { + _ structs.HostLayout + Id LinkID +} func LinkGetFdById(attr *LinkGetFdByIdAttr) (*FD, error) { fd, err := BPF(BPF_LINK_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) @@ -860,6 +1078,7 @@ func LinkGetFdById(attr *LinkGetFdByIdAttr) (*FD, error) { } type LinkGetNextIdAttr struct { + _ structs.HostLayout Id LinkID NextId LinkID } @@ -870,6 +1089,7 @@ func LinkGetNextId(attr *LinkGetNextIdAttr) error { } type LinkUpdateAttr struct { + _ structs.HostLayout LinkFd uint32 NewProgFd uint32 Flags uint32 @@ -882,11 +1102,12 @@ func LinkUpdate(attr *LinkUpdateAttr) error { } type MapCreateAttr struct { + _ structs.HostLayout MapType MapType KeySize uint32 ValueSize uint32 MaxEntries uint32 - MapFlags MapFlags + MapFlags uint32 InnerMapFd uint32 NumaNode uint32 MapName ObjName @@ -896,6 +1117,8 @@ type MapCreateAttr struct { BtfValueTypeId TypeID BtfVmlinuxValueTypeId TypeID MapExtra uint64 + ValueTypeBtfObjFd int32 + MapTokenFd int32 } func MapCreate(attr *MapCreateAttr) (*FD, error) { @@ -907,6 +1130,7 @@ func MapCreate(attr *MapCreateAttr) (*FD, error) { } type MapDeleteBatchAttr struct { + _ structs.HostLayout InBatch Pointer OutBatch Pointer Keys Pointer @@ -923,6 +1147,7 @@ func MapDeleteBatch(attr *MapDeleteBatchAttr) error { } type MapDeleteElemAttr struct { + _ structs.HostLayout MapFd uint32 _ [4]byte Key Pointer @@ -935,14 +1160,20 @@ func MapDeleteElem(attr *MapDeleteElemAttr) error { return err } -type MapFreezeAttr struct{ MapFd uint32 } +type MapFreezeAttr struct { + _ structs.HostLayout + MapFd uint32 +} func MapFreeze(attr *MapFreezeAttr) error { _, err := BPF(BPF_MAP_FREEZE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) return err } -type MapGetFdByIdAttr struct{ Id uint32 } +type MapGetFdByIdAttr struct { + _ structs.HostLayout + Id uint32 +} func MapGetFdById(attr *MapGetFdByIdAttr) (*FD, error) { fd, err := BPF(BPF_MAP_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) @@ -953,6 +1184,7 @@ func MapGetFdById(attr *MapGetFdByIdAttr) (*FD, error) { } type MapGetNextIdAttr struct { + _ structs.HostLayout Id uint32 NextId uint32 } @@ -963,6 +1195,7 @@ func MapGetNextId(attr *MapGetNextIdAttr) error { } type MapGetNextKeyAttr struct { + _ structs.HostLayout MapFd uint32 _ [4]byte Key Pointer @@ -975,6 +1208,7 @@ func MapGetNextKey(attr *MapGetNextKeyAttr) error { } type MapLookupAndDeleteBatchAttr struct { + _ structs.HostLayout InBatch Pointer OutBatch Pointer Keys Pointer @@ -991,6 +1225,7 @@ func MapLookupAndDeleteBatch(attr *MapLookupAndDeleteBatchAttr) error { } type MapLookupAndDeleteElemAttr struct { + _ structs.HostLayout MapFd uint32 _ [4]byte Key Pointer @@ -1004,6 +1239,7 @@ func MapLookupAndDeleteElem(attr *MapLookupAndDeleteElemAttr) error { } type MapLookupBatchAttr struct { + _ structs.HostLayout InBatch Pointer OutBatch Pointer Keys Pointer @@ -1020,6 +1256,7 @@ func MapLookupBatch(attr *MapLookupBatchAttr) error { } type MapLookupElemAttr struct { + _ structs.HostLayout MapFd uint32 _ [4]byte Key Pointer @@ -1033,6 +1270,7 @@ func MapLookupElem(attr *MapLookupElemAttr) error { } type MapUpdateBatchAttr struct { + _ structs.HostLayout InBatch Pointer OutBatch Pointer Keys Pointer @@ -1049,6 +1287,7 @@ func MapUpdateBatch(attr *MapUpdateBatchAttr) error { } type MapUpdateElemAttr struct { + _ structs.HostLayout MapFd uint32 _ [4]byte Key Pointer @@ -1062,7 +1301,8 @@ func MapUpdateElem(attr *MapUpdateElemAttr) error { } type ObjGetAttr struct { - Pathname Pointer + _ structs.HostLayout + Pathname StringPointer BpfFd uint32 FileFlags uint32 PathFd int32 @@ -1078,6 +1318,7 @@ func ObjGet(attr *ObjGetAttr) (*FD, error) { } type ObjGetInfoByFdAttr struct { + _ structs.HostLayout BpfFd uint32 InfoLen uint32 Info Pointer @@ -1089,7 +1330,8 @@ func ObjGetInfoByFd(attr *ObjGetInfoByFdAttr) error { } type ObjPinAttr struct { - Pathname Pointer + _ structs.HostLayout + Pathname StringPointer BpfFd uint32 FileFlags uint32 PathFd int32 @@ -1102,6 +1344,7 @@ func ObjPin(attr *ObjPinAttr) error { } type ProgAttachAttr struct { + _ structs.HostLayout TargetFdOrIfindex uint32 AttachBpfFd uint32 AttachType uint32 @@ -1117,6 +1360,7 @@ func ProgAttach(attr *ProgAttachAttr) error { } type ProgBindMapAttr struct { + _ structs.HostLayout ProgFd uint32 MapFd uint32 Flags uint32 @@ -1128,6 +1372,7 @@ func ProgBindMap(attr *ProgBindMapAttr) error { } type ProgDetachAttr struct { + _ structs.HostLayout TargetFdOrIfindex uint32 AttachBpfFd uint32 AttachType uint32 @@ -1142,7 +1387,10 @@ func ProgDetach(attr *ProgDetachAttr) error { return err } -type ProgGetFdByIdAttr struct{ Id uint32 } +type ProgGetFdByIdAttr struct { + _ structs.HostLayout + Id uint32 +} func ProgGetFdById(attr *ProgGetFdByIdAttr) (*FD, error) { fd, err := BPF(BPF_PROG_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) @@ -1153,6 +1401,7 @@ func ProgGetFdById(attr *ProgGetFdByIdAttr) (*FD, error) { } type ProgGetNextIdAttr struct { + _ structs.HostLayout Id uint32 NextId uint32 } @@ -1163,13 +1412,14 @@ func ProgGetNextId(attr *ProgGetNextIdAttr) error { } type ProgLoadAttr struct { + _ structs.HostLayout ProgType ProgType InsnCnt uint32 - Insns Pointer - License Pointer + Insns TypedPointer[uint8] + License StringPointer LogLevel LogLevel LogSize uint32 - LogBuf Pointer + LogBuf TypedPointer[uint8] KernVersion uint32 ProgFlags uint32 ProgName ObjName @@ -1177,18 +1427,20 @@ type ProgLoadAttr struct { ExpectedAttachType AttachType ProgBtfFd uint32 FuncInfoRecSize uint32 - FuncInfo Pointer + FuncInfo TypedPointer[uint8] FuncInfoCnt uint32 LineInfoRecSize uint32 - LineInfo Pointer + LineInfo TypedPointer[uint8] LineInfoCnt uint32 AttachBtfId TypeID AttachBtfObjFd uint32 CoreReloCnt uint32 - FdArray Pointer - CoreRelos Pointer + FdArray TypedPointer[int32] + CoreRelos TypedPointer[uint8] CoreReloRecSize uint32 LogTrueSize uint32 + ProgTokenFd int32 + _ [4]byte } func ProgLoad(attr *ProgLoadAttr) (*FD, error) { @@ -1200,16 +1452,17 @@ func ProgLoad(attr *ProgLoadAttr) (*FD, error) { } type ProgQueryAttr struct { + _ structs.HostLayout TargetFdOrIfindex uint32 AttachType AttachType QueryFlags uint32 AttachFlags uint32 - ProgIds Pointer + ProgIds TypedPointer[ProgramID] Count uint32 _ [4]byte - ProgAttachFlags Pointer - LinkIds Pointer - LinkAttachFlags Pointer + ProgAttachFlags TypedPointer[ProgramID] + LinkIds TypedPointer[LinkID] + LinkAttachFlags TypedPointer[LinkID] Revision uint64 } @@ -1219,18 +1472,19 @@ func ProgQuery(attr *ProgQueryAttr) error { } type ProgRunAttr struct { + _ structs.HostLayout ProgFd uint32 Retval uint32 DataSizeIn uint32 DataSizeOut uint32 - DataIn Pointer - DataOut Pointer + DataIn TypedPointer[uint8] + DataOut TypedPointer[uint8] Repeat uint32 Duration uint32 CtxSizeIn uint32 CtxSizeOut uint32 - CtxIn Pointer - CtxOut Pointer + CtxIn TypedPointer[uint8] + CtxOut TypedPointer[uint8] Flags uint32 Cpu uint32 BatchSize uint32 @@ -1243,9 +1497,11 @@ func ProgRun(attr *ProgRunAttr) error { } type RawTracepointOpenAttr struct { - Name Pointer + _ structs.HostLayout + Name StringPointer ProgFd uint32 _ [4]byte + Cookie uint64 } func RawTracepointOpen(attr *RawTracepointOpenAttr) (*FD, error) { @@ -1257,6 +1513,7 @@ func RawTracepointOpen(attr *RawTracepointOpenAttr) (*FD, error) { } type CgroupLinkInfo struct { + _ structs.HostLayout Type LinkType Id LinkID ProgId uint32 @@ -1267,42 +1524,47 @@ type CgroupLinkInfo struct { } type IterLinkInfo struct { + _ structs.HostLayout Type LinkType Id LinkID ProgId uint32 _ [4]byte - TargetName Pointer + TargetName TypedPointer[uint8] TargetNameLen uint32 } type KprobeLinkInfo struct { + _ structs.HostLayout Type LinkType Id LinkID ProgId uint32 _ [4]byte PerfEventType PerfEventType _ [4]byte - FuncName Pointer + FuncName TypedPointer[uint8] NameLen uint32 Offset uint32 Addr uint64 Missed uint64 - _ [8]byte + Cookie uint64 } type KprobeMultiLinkInfo struct { - Type LinkType - Id LinkID - ProgId uint32 - _ [4]byte - Addrs Pointer - Count uint32 - Flags uint32 - Missed uint64 - _ [24]byte + _ structs.HostLayout + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Addrs TypedPointer[uint64] + Count uint32 + Flags uint32 + Missed uint64 + Cookies TypedPointer[uint64] + _ [16]byte } type NetNsLinkInfo struct { + _ structs.HostLayout Type LinkType Id LinkID ProgId uint32 @@ -1313,6 +1575,7 @@ type NetNsLinkInfo struct { } type NetfilterLinkInfo struct { + _ structs.HostLayout Type LinkType Id LinkID ProgId uint32 @@ -1325,6 +1588,7 @@ type NetfilterLinkInfo struct { } type NetkitLinkInfo struct { + _ structs.HostLayout Type LinkType Id LinkID ProgId uint32 @@ -1335,6 +1599,7 @@ type NetkitLinkInfo struct { } type PerfEventLinkInfo struct { + _ structs.HostLayout Type LinkType Id LinkID ProgId uint32 @@ -1343,16 +1608,18 @@ type PerfEventLinkInfo struct { } type RawTracepointLinkInfo struct { + _ structs.HostLayout Type LinkType Id LinkID ProgId uint32 _ [4]byte - TpName Pointer + TpName TypedPointer[uint8] TpNameLen uint32 _ [36]byte } type TcxLinkInfo struct { + _ structs.HostLayout Type LinkType Id LinkID ProgId uint32 @@ -1363,6 +1630,7 @@ type TcxLinkInfo struct { } type TracingLinkInfo struct { + _ structs.HostLayout Type LinkType Id LinkID ProgId uint32 @@ -1374,6 +1642,7 @@ type TracingLinkInfo struct { } type XDPLinkInfo struct { + _ structs.HostLayout Type LinkType Id LinkID ProgId uint32 diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go index d184ea196aeb..62e483a1c412 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go @@ -51,19 +51,19 @@ func SyscallOutput(dst any, size int) Buffer { // // Returns the number of copied bytes. func (b Buffer) CopyTo(dst []byte) int { - return copy(dst, b.unsafeBytes()) + return copy(dst, b.Bytes()) } // AppendTo appends the buffer onto dst. func (b Buffer) AppendTo(dst []byte) []byte { - return append(dst, b.unsafeBytes()...) + return append(dst, b.Bytes()...) } // Pointer returns the location where a syscall should write. func (b Buffer) Pointer() sys.Pointer { // NB: This deliberately ignores b.length to support zero-copy // marshaling / unmarshaling using unsafe.Pointer. - return sys.NewPointer(b.ptr) + return sys.UnsafePointer(b.ptr) } // Unmarshal the buffer into the provided value. @@ -72,10 +72,12 @@ func (b Buffer) Unmarshal(data any) error { return nil } - return Unmarshal(data, b.unsafeBytes()) + return Unmarshal(data, b.Bytes()) } -func (b Buffer) unsafeBytes() []byte { +// Bytes returns the buffer as a byte slice. Returns nil if the Buffer was +// created using UnsafeBuffer or by zero-copy unmarshaling. +func (b Buffer) Bytes() []byte { if b.size == syscallPointerOnly { return nil } diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go index 0026af8f24fb..3f7deb80f197 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go @@ -1,14 +1,12 @@ package sysenc import ( - "bytes" "encoding" "encoding/binary" "errors" "fmt" "reflect" "slices" - "sync" "unsafe" "github.com/cilium/ebpf/internal" @@ -53,11 +51,7 @@ func Marshal(data any, size int) (Buffer, error) { return newBuffer(buf), nil } - wr := internal.NewBuffer(make([]byte, 0, size)) - defer internal.PutBuffer(wr) - - err = binary.Write(wr, internal.NativeEndian, value) - buf = wr.Bytes() + buf, err = binary.Append(nil, internal.NativeEndian, value) } if err != nil { return Buffer{}, err @@ -70,16 +64,10 @@ func Marshal(data any, size int) (Buffer, error) { return newBuffer(buf), nil } -var bytesReaderPool = sync.Pool{ - New: func() interface{} { - return new(bytes.Reader) - }, -} - // Unmarshal a byte slice in the system's native endianness into data. // // Returns an error if buf can't be unmarshalled according to the behaviour -// of [binary.Read]. +// of [binary.Decode]. func Unmarshal(data interface{}, buf []byte) error { switch value := data.(type) { case encoding.BinaryUnmarshaler: @@ -100,16 +88,12 @@ func Unmarshal(data interface{}, buf []byte) error { return nil } - rd := bytesReaderPool.Get().(*bytes.Reader) - defer bytesReaderPool.Put(rd) - - rd.Reset(buf) - - if err := binary.Read(rd, internal.NativeEndian, value); err != nil { + n, err := binary.Decode(buf, internal.NativeEndian, value) + if err != nil { return err } - if rd.Len() != 0 { + if n != len(buf) { return fmt.Errorf("unmarshaling %T doesn't consume all data", data) } diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/testutils/testmain/fd_trace.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/testutils/testmain/fd_trace.go new file mode 100644 index 000000000000..c47acf89cede --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/testutils/testmain/fd_trace.go @@ -0,0 +1,103 @@ +package testmain + +import ( + "bytes" + "fmt" + "os" + "runtime" + "sync" + "sync/atomic" +) + +// foundLeak is atomic since the GC may collect objects in parallel. +var foundLeak atomic.Bool + +func onLeakFD(fs *runtime.Frames) { + foundLeak.Store(true) + fmt.Fprintln(os.Stderr, "leaked fd created at:") + fmt.Fprintln(os.Stderr, formatFrames(fs)) +} + +// fds is a registry of all file descriptors wrapped into sys.fds that were +// created while an fd tracer was active. +var fds *sync.Map // map[int]*runtime.Frames + +// TraceFD associates raw with the current execution stack. +// +// skip controls how many entries of the stack the function should skip. +func TraceFD(raw int, skip int) { + if fds == nil { + return + } + + // Attempt to store the caller's stack for the given fd value. + // Panic if fds contains an existing stack for the fd. + old, exist := fds.LoadOrStore(raw, callersFrames(skip)) + if exist { + f := old.(*runtime.Frames) + panic(fmt.Sprintf("found existing stack for fd %d:\n%s", raw, formatFrames(f))) + } +} + +// ForgetFD removes any existing association for raw. +func ForgetFD(raw int) { + if fds != nil { + fds.Delete(raw) + } +} + +// LeakFD indicates that raw was leaked. +// +// Calling the function with a value that was not passed to [TraceFD] before +// is undefined. +func LeakFD(raw int) { + if fds == nil { + return + } + + // Invoke the fd leak callback. Calls LoadAndDelete to guarantee the callback + // is invoked at most once for one sys.FD allocation, runtime.Frames can only + // be unwound once. + f, ok := fds.LoadAndDelete(raw) + if ok { + onLeakFD(f.(*runtime.Frames)) + } +} + +// flushFrames removes all elements from fds and returns them as a slice. This +// deals with the fact that a runtime.Frames can only be unwound once using +// Next(). +func flushFrames() []*runtime.Frames { + var frames []*runtime.Frames + fds.Range(func(key, value any) bool { + frames = append(frames, value.(*runtime.Frames)) + fds.Delete(key) + return true + }) + return frames +} + +func callersFrames(skip int) *runtime.Frames { + c := make([]uintptr, 32) + + // Skip runtime.Callers and this function. + i := runtime.Callers(skip+2, c) + if i == 0 { + return nil + } + + return runtime.CallersFrames(c) +} + +// formatFrames formats a runtime.Frames as a human-readable string. +func formatFrames(fs *runtime.Frames) string { + var b bytes.Buffer + for { + f, more := fs.Next() + b.WriteString(fmt.Sprintf("\t%s+%#x\n\t\t%s:%d\n", f.Function, f.PC-f.Entry, f.File, f.Line)) + if !more { + break + } + } + return b.String() +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/testutils/testmain/main.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/testutils/testmain/main.go new file mode 100644 index 000000000000..53de97c86367 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/testutils/testmain/main.go @@ -0,0 +1,58 @@ +package testmain + +import ( + "flag" + "fmt" + "os" + "sync" + + "github.com/cilium/ebpf/internal/platform" +) + +type testingM interface { + Run() int +} + +// Run m with various debug aids enabled. +// +// The function calls [os.Exit] and does not return. +func Run(m testingM) { + const traceLogFlag = "trace-log" + + var ts *traceSession + if platform.IsWindows { + traceLog := flag.Bool(traceLogFlag, false, "Output a trace of eBPF runtime activity") + flag.Parse() + + if *traceLog { + var err error + ts, err = newTraceSession() + if err != nil { + fmt.Fprintln(os.Stderr, "Disabling trace logging:", err) + } + } + } + defer ts.Close() + + fds = new(sync.Map) + ret := m.Run() + + for _, f := range flushFrames() { + onLeakFD(f) + } + + if foundLeak.Load() { + ret = 99 + } + + if err := ts.Dump(os.Stderr); err != nil { + fmt.Fprintln(os.Stderr, "Error while dumping trace log:", err) + ret = 99 + } + + if platform.IsWindows && ret != 0 && ts == nil { + fmt.Fprintf(os.Stderr, "Consider enabling trace logging with -%s\n", traceLogFlag) + } + + os.Exit(ret) +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/testutils/testmain/windows.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/testutils/testmain/windows.go new file mode 100644 index 000000000000..533af9dbb289 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/testutils/testmain/windows.go @@ -0,0 +1,219 @@ +package testmain + +import ( + "encoding/xml" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "slices" + "strconv" + "strings" + "text/tabwriter" +) + +type tracelogKeywords uint64 + +// Know tracelog keywords. +// +// See https://github.com/microsoft/ebpf-for-windows/blob/main/libs/shared/ebpf_tracelog.h +var allKeywords = []string{ + "entry-exit", + "base", + "error", + "epoch", + "core", + "link", + "map", + "program", + "api", + "printk", + "native", +} + +func (kw *tracelogKeywords) UnmarshalText(text []byte) error { + decoded, err := strconv.ParseUint(string(text), 0, 64) + if err != nil { + return fmt.Errorf("foo: %w", err) + } + *kw = tracelogKeywords(decoded) + return nil +} + +func (kw tracelogKeywords) decode() []string { + var keywords []string + for _, keyword := range allKeywords { + if kw&1 > 0 { + keywords = append(keywords, keyword) + } + kw >>= 1 + } + if kw > 0 { + keywords = append(keywords, fmt.Sprintf("0x%x", kw)) + } + return keywords +} + +type traceSession struct { + session string +} + +// newTraceSession starts a trace log for eBPF for Windows related events. +// +// * https://github.com/microsoft/ebpf-for-windows/blob/main/docs/GettingStarted.md#using-tracing +// * https://devblogs.microsoft.com/performance-diagnostics/controlling-the-event-session-name-with-the-instance-name/ and +func newTraceSession() (*traceSession, error) { + def := filepath.Join(os.Getenv("ProgramFiles"), "ebpf-for-windows\\ebpfforwindows.wprp") + if _, err := os.Stat(def); err != nil { + return nil, err + } + + session := fmt.Sprintf("epbf-go-%d", os.Getpid()) + wpr := exec.Command("wpr.exe", "-start", def, "-filemode", "-instancename", session) + wpr.Stderr = os.Stderr + if err := wpr.Run(); err != nil { + return nil, err + } + + return &traceSession{session}, nil +} + +func (ts *traceSession) Close() error { + if ts == nil { + return nil + } + + return ts.stop(os.DevNull) +} + +func (ts *traceSession) stop(file string) error { + if ts.session == "" { + return nil + } + + wpr := exec.Command("wpr.exe", "-stop", file, "-instancename", ts.session) + if err := wpr.Run(); err != nil { + return err + } + + ts.session = "" + return nil +} + +func (ts *traceSession) Dump(w io.Writer) error { + if ts == nil { + return nil + } + + path, err := os.MkdirTemp("", "ebpf-go-trace") + if err != nil { + return err + } + defer os.RemoveAll(path) + + trace := filepath.Join(path, "trace.etl") + if err := ts.stop(trace); err != nil { + return fmt.Errorf("write trace: %w", err) + } + + netsh := exec.Command("netsh.exe", "trace", "convert", trace, "dump=XML") + if err := netsh.Run(); err != nil { + return err + } + + f, err := os.Open(filepath.Join(path, "trace.xml")) + if err != nil { + return err + } + defer f.Close() + + return summariseWPRTrace(f, w) +} + +func summariseWPRTrace(r io.Reader, w io.Writer) error { + type nameValue struct { + Name string `xml:"Name,attr"` + Value string `xml:",chardata"` + } + + type event struct { + XMLName xml.Name `xml:"Event"` + System struct { + Provider struct { + Name string `xml:"Name,attr"` + } `xml:"Provider"` + TimeCreated struct { + SystemTime string `xml:"SystemTime,attr"` + } `xml:"TimeCreated"` + Keywords tracelogKeywords `xml:"Keywords"` + Level uint64 `xml:"Level"` + } `xml:"System"` + EventData struct { + Data []nameValue `xml:"Data"` + } `xml:"EventData"` + RenderingInfo struct { + Task string `xml:"Task"` + } `xml:"RenderingInfo"` + } + + var events struct { + Events []event `xml:"Event"` + } + + err := xml.NewDecoder(r).Decode(&events) + if err != nil { + return fmt.Errorf("unmarshal trace XML: %w", err) + } + + tw := tabwriter.NewWriter(w, 0, 0, 1, ' ', 0) + for _, event := range events.Events { + if !strings.Contains(event.System.Provider.Name, "Ebpf") { + continue + } + + flag := " " + // See https://learn.microsoft.com/en-us/windows/win32/api/traceloggingprovider/nf-traceloggingprovider-tracelogginglevel#remarks + if event.System.Level > 0 && event.System.Level <= 3 { + flag = "!" + } + + kw := event.System.Keywords.decode() + fmt.Fprintf(tw, "%s\t%s\t", flag, strings.Join(kw, ",")) + + data := event.EventData.Data + slices.SortFunc(data, func(a, b nameValue) int { + return strings.Compare(a.Name, b.Name) + }) + + var first string + for _, name := range []string{ + "Entry", + "Message", + "ErrorMessage", + } { + i := slices.IndexFunc(data, func(kv nameValue) bool { + return kv.Name == name + }) + + if i == -1 { + continue + } + + first = data[i].Value + data = slices.Delete(data, i, i+1) + break + } + + // NB: This may be empty. + fmt.Fprintf(tw, "%s\t", first) + + for _, data := range data { + fmt.Fprintf(tw, "%s=%s\t", data.Name, data.Value) + } + + fmt.Fprintln(tw) + } + + return tw.Flush() +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go index 897740fec0cb..7f7b3cba51fd 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go @@ -12,6 +12,8 @@ import ( "syscall" "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/linux" + "github.com/cilium/ebpf/internal/platform" "github.com/cilium/ebpf/internal/unix" ) @@ -112,6 +114,10 @@ func sanitizeTracefsPath(path ...string) (string, error) { // but may be also be available at /sys/kernel/debug/tracing if debugfs is mounted. // The available tracefs paths will depends on distribution choices. var getTracefsPath = sync.OnceValues(func() (string, error) { + if !platform.IsLinux { + return "", fmt.Errorf("tracefs: %w", internal.ErrNotSupportedOnOS) + } + for _, p := range []struct { path string fsType int64 @@ -121,7 +127,7 @@ var getTracefsPath = sync.OnceValues(func() (string, error) { // RHEL/CentOS {"/sys/kernel/debug/tracing", unix.DEBUGFS_MAGIC}, } { - if fsType, err := internal.FSType(p.path); err == nil && fsType == p.fsType { + if fsType, err := linux.FSType(p.path); err == nil && fsType == p.fsType { return p.path, nil } } @@ -213,7 +219,10 @@ func NewEvent(args ProbeArgs) (*Event, error) { if err == nil { return nil, fmt.Errorf("trace event %s/%s: %w", args.Group, eventName, os.ErrExist) } - if err != nil && !errors.Is(err, os.ErrNotExist) { + if errors.Is(err, unix.EINVAL) { + return nil, fmt.Errorf("trace event %s/%s: %w (unknown symbol?)", args.Group, eventName, err) + } + if !errors.Is(err, os.ErrNotExist) { return nil, fmt.Errorf("checking trace event %s/%s: %w", args.Group, eventName, err) } @@ -297,7 +306,11 @@ func NewEvent(args ProbeArgs) (*Event, error) { if err := removeEvent(args.Type, event); err != nil { return nil, fmt.Errorf("failed to remove spurious maxactive event: %s", err) } - return nil, fmt.Errorf("create trace event with non-default maxactive: %w", internal.ErrNotSupported) + + return nil, &internal.UnsupportedFeatureError{ + MinimumVersion: internal.Version{4, 12}, + Name: "trace event with non-default maxactive", + } } if err != nil { return nil, fmt.Errorf("get trace event id: %w", err) diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/unix/errno_linux.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/unix/errno_linux.go new file mode 100644 index 000000000000..0c4886bd13a7 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/unix/errno_linux.go @@ -0,0 +1,29 @@ +package unix + +import ( + "syscall" + + linux "golang.org/x/sys/unix" +) + +type Errno = syscall.Errno + +const ( + E2BIG = linux.E2BIG + EACCES = linux.EACCES + EAGAIN = linux.EAGAIN + EBADF = linux.EBADF + EEXIST = linux.EEXIST + EFAULT = linux.EFAULT + EILSEQ = linux.EILSEQ + EINTR = linux.EINTR + EINVAL = linux.EINVAL + ENODEV = linux.ENODEV + ENOENT = linux.ENOENT + ENOSPC = linux.ENOSPC + EOPNOTSUPP = linux.EOPNOTSUPP + EPERM = linux.EPERM + EPOLLIN = linux.EPOLLIN + ESRCH = linux.ESRCH + ESTALE = linux.ESTALE +) diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/unix/errno_other.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/unix/errno_other.go new file mode 100644 index 000000000000..fc2b042b5471 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/unix/errno_other.go @@ -0,0 +1,29 @@ +//go:build !linux && !windows + +package unix + +import "syscall" + +type Errno = syscall.Errno + +// Errnos are distinct and non-zero. +const ( + E2BIG Errno = iota + 1 + EACCES + EAGAIN + EBADF + EEXIST + EFAULT + EILSEQ + EINTR + EINVAL + ENODEV + ENOENT + ENOSPC + ENOTSUP + ENOTSUPP + EOPNOTSUPP + EPERM + ESRCH + ESTALE +) diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/unix/errno_string_windows.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/unix/errno_string_windows.go new file mode 100644 index 000000000000..6077e983f3f1 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/unix/errno_string_windows.go @@ -0,0 +1,59 @@ +// Code generated by "stringer -type=Errno -tags=windows -output=errno_string_windows.go"; DO NOT EDIT. + +package unix + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[EPERM-1] + _ = x[ENOENT-2] + _ = x[ESRCH-3] + _ = x[EINTR-4] + _ = x[E2BIG-7] + _ = x[EBADF-9] + _ = x[EAGAIN-11] + _ = x[EACCES-13] + _ = x[EFAULT-14] + _ = x[EEXIST-17] + _ = x[ENODEV-19] + _ = x[EINVAL-22] + _ = x[ENOSPC-28] + _ = x[EILSEQ-42] + _ = x[ENOTSUP-129] + _ = x[EOPNOTSUPP-130] + _ = x[ENOTSUPP-536870912] + _ = x[ESTALE-536870913] +} + +const _Errno_name = "EPERMENOENTESRCHEINTRE2BIGEBADFEAGAINEACCESEFAULTEEXISTENODEVEINVALENOSPCEILSEQENOTSUPEOPNOTSUPPENOTSUPPESTALE" + +var _Errno_map = map[Errno]string{ + 1: _Errno_name[0:5], + 2: _Errno_name[5:11], + 3: _Errno_name[11:16], + 4: _Errno_name[16:21], + 7: _Errno_name[21:26], + 9: _Errno_name[26:31], + 11: _Errno_name[31:37], + 13: _Errno_name[37:43], + 14: _Errno_name[43:49], + 17: _Errno_name[49:55], + 19: _Errno_name[55:61], + 22: _Errno_name[61:67], + 28: _Errno_name[67:73], + 42: _Errno_name[73:79], + 129: _Errno_name[79:86], + 130: _Errno_name[86:96], + 536870912: _Errno_name[96:104], + 536870913: _Errno_name[104:110], +} + +func (i Errno) String() string { + if str, ok := _Errno_map[i]; ok { + return str + } + return "Errno(" + strconv.FormatInt(int64(i), 10) + ")" +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/unix/errno_windows.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/unix/errno_windows.go new file mode 100644 index 000000000000..7500cd6d4ee1 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/unix/errno_windows.go @@ -0,0 +1,78 @@ +package unix + +// The code in this file is derived from syscall_unix.go in the Go source code, +// licensed under the MIT license. + +import ( + "errors" + "os" + "syscall" +) + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -type=Errno -tags=windows -output=errno_string_windows.go + +// Windows specific constants for Unix errnos. +// +// The values do not always match Linux, for example EILSEQ and EOPNOTSUPP. +// +// See https://learn.microsoft.com/en-us/cpp/c-runtime-library/errno-constants?view=msvc-170 +const ( + EPERM Errno = 1 + ENOENT Errno = 2 + ESRCH Errno = 3 + EINTR Errno = 4 + E2BIG Errno = 7 + EBADF Errno = 9 + EAGAIN Errno = 11 + EACCES Errno = 13 + EFAULT Errno = 14 + EEXIST Errno = 17 + ENODEV Errno = 19 + EINVAL Errno = 22 + ENFILE Errno = 23 + EMFILE Errno = 24 + ENOSPC Errno = 28 + ENOSYS Errno = 40 + ENOTEMPTY Errno = 41 + EILSEQ Errno = 42 + ENOTSUP Errno = 129 + EOPNOTSUPP Errno = 130 + ETIMEDOUT Errno = 138 + EWOULDBLOCK Errno = 140 +) + +// These constants do not exist on Windows and therefore have a non-zero +// dummy value. +const ( + ENOTSUPP Errno = Errno(syscall.APPLICATION_ERROR) + iota + ESTALE +) + +// Errno is a Windows compatibility shim for Unix errnos. +type Errno uintptr + +func (e Errno) Error() string { + return e.String() +} + +func (e Errno) Is(target error) bool { + switch target { + case os.ErrPermission: + return e == EACCES || e == EPERM + case os.ErrExist: + return e == EEXIST || e == ENOTEMPTY + case os.ErrNotExist: + return e == ENOENT + case errors.ErrUnsupported: + return e == ENOSYS || e == ENOTSUP || e == EOPNOTSUPP + } + return false +} + +func (e Errno) Temporary() bool { + return e == EINTR || e == EMFILE || e == ENFILE || e.Timeout() +} + +func (e Errno) Timeout() bool { + return e == EAGAIN || e == EWOULDBLOCK || e == ETIMEDOUT +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/unix/error.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/unix/error.go new file mode 100644 index 000000000000..48017c1009c5 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/unix/error.go @@ -0,0 +1,23 @@ +package unix + +import ( + "fmt" + "runtime" + "strings" + + "github.com/cilium/ebpf/internal" +) + +// errNonLinux returns an error which wraps [internal.ErrNotSupportedOnOS] and +// includes the name of the calling function. +func errNonLinux() error { + name := "unknown" + pc, _, _, ok := runtime.Caller(1) + if ok { + name = runtime.FuncForPC(pc).Name() + if pos := strings.LastIndexByte(name, '.'); pos != -1 { + name = name[pos+1:] + } + } + return fmt.Errorf("unix: %s: %w", name, internal.ErrNotSupportedOnOS) +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/unix/strings_other.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/unix/strings_other.go new file mode 100644 index 000000000000..76f367aa855a --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/unix/strings_other.go @@ -0,0 +1,15 @@ +//go:build !linux && !windows + +package unix + +func BytePtrFromString(s string) (*byte, error) { + return nil, errNonLinux() +} + +func ByteSliceToString(s []byte) string { + return "" +} + +func ByteSliceFromString(s string) ([]byte, error) { + return nil, errNonLinux() +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/unix/strings_windows.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/unix/strings_windows.go new file mode 100644 index 000000000000..00af5a968633 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/unix/strings_windows.go @@ -0,0 +1,23 @@ +package unix + +import ( + "syscall" + + "golang.org/x/sys/windows" +) + +func BytePtrFromString(s string) (*byte, error) { + p, err := windows.BytePtrFromString(s) + if err == syscall.EINVAL { + err = EINVAL + } + return p, err +} + +func ByteSliceToString(s []byte) string { + return windows.ByteSliceToString(s) +} + +func ByteSliceFromString(s string) ([]byte, error) { + return windows.ByteSliceFromString(s) +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go index d725cfaa3941..14a0a192909b 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go @@ -4,30 +4,11 @@ package unix import ( "syscall" + "unsafe" linux "golang.org/x/sys/unix" ) -const ( - ENOENT = linux.ENOENT - EEXIST = linux.EEXIST - EAGAIN = linux.EAGAIN - ENOSPC = linux.ENOSPC - EINVAL = linux.EINVAL - EPOLLIN = linux.EPOLLIN - EINTR = linux.EINTR - EPERM = linux.EPERM - ESRCH = linux.ESRCH - ENODEV = linux.ENODEV - EBADF = linux.EBADF - E2BIG = linux.E2BIG - EFAULT = linux.EFAULT - EACCES = linux.EACCES - EILSEQ = linux.EILSEQ - EOPNOTSUPP = linux.EOPNOTSUPP - ESTALE = linux.ESTALE -) - const ( BPF_F_NO_PREALLOC = linux.BPF_F_NO_PREALLOC BPF_F_NUMA_NODE = linux.BPF_F_NUMA_NODE @@ -58,6 +39,7 @@ const ( PROT_WRITE = linux.PROT_WRITE MAP_ANON = linux.MAP_ANON MAP_SHARED = linux.MAP_SHARED + MAP_FIXED = linux.MAP_FIXED MAP_PRIVATE = linux.MAP_PRIVATE PERF_ATTR_SIZE_VER1 = linux.PERF_ATTR_SIZE_VER1 PERF_TYPE_SOFTWARE = linux.PERF_TYPE_SOFTWARE @@ -81,15 +63,16 @@ const ( SO_DETACH_BPF = linux.SO_DETACH_BPF SOL_SOCKET = linux.SOL_SOCKET SIGPROF = linux.SIGPROF + SIGUSR1 = linux.SIGUSR1 SIG_BLOCK = linux.SIG_BLOCK SIG_UNBLOCK = linux.SIG_UNBLOCK - EM_NONE = linux.EM_NONE - EM_BPF = linux.EM_BPF BPF_FS_MAGIC = linux.BPF_FS_MAGIC TRACEFS_MAGIC = linux.TRACEFS_MAGIC DEBUGFS_MAGIC = linux.DEBUGFS_MAGIC BPF_RB_NO_WAKEUP = linux.BPF_RB_NO_WAKEUP BPF_RB_FORCE_WAKEUP = linux.BPF_RB_FORCE_WAKEUP + AF_UNSPEC = linux.AF_UNSPEC + IFF_UP = linux.IFF_UP ) type Statfs_t = linux.Statfs_t @@ -155,6 +138,11 @@ func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, e return linux.Mmap(fd, offset, length, prot, flags) } +//go:nocheckptr +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + return linux.MmapPtr(fd, offset, addr, length, prot, flags) +} + func Munmap(b []byte) (err error) { return linux.Munmap(b) } @@ -187,6 +175,10 @@ func ByteSliceToString(s []byte) string { return linux.ByteSliceToString(s) } +func ByteSliceFromString(s string) ([]byte, error) { + return linux.ByteSliceFromString(s) +} + func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) error { return linux.Renameat2(olddirfd, oldpath, newdirfd, newpath, flags) } @@ -214,3 +206,7 @@ func SchedSetaffinity(pid int, set *CPUSet) error { func SchedGetaffinity(pid int, set *CPUSet) error { return linux.SchedGetaffinity(pid, set) } + +func Auxv() ([][2]uintptr, error) { + return linux.Auxv() +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/unix/types_other.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/unix/types_other.go index 3ff8962716a2..f3f764ebe68b 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/internal/unix/types_other.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/unix/types_other.go @@ -3,31 +3,8 @@ package unix import ( - "fmt" - "runtime" "syscall" -) - -var errNonLinux = fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH) - -// Errnos are distinct and non-zero. -const ( - ENOENT syscall.Errno = iota + 1 - EEXIST - EAGAIN - ENOSPC - EINVAL - EINTR - EPERM - ESRCH - ENODEV - EBADF - E2BIG - EFAULT - EACCES - EILSEQ - EOPNOTSUPP - ESTALE + "unsafe" ) // Constants are distinct to avoid breaking switch statements. @@ -61,6 +38,7 @@ const ( PROT_WRITE MAP_ANON MAP_SHARED + MAP_FIXED MAP_PRIVATE PERF_ATTR_SIZE_VER1 PERF_TYPE_SOFTWARE @@ -84,16 +62,17 @@ const ( SO_DETACH_BPF SOL_SOCKET SIGPROF + SIGUSR1 SIG_BLOCK SIG_UNBLOCK - EM_NONE - EM_BPF BPF_FS_MAGIC TRACEFS_MAGIC DEBUGFS_MAGIC BPF_RB_NO_WAKEUP BPF_RB_FORCE_WAKEUP BPF_F_LOCK + AF_UNSPEC + IFF_UP ) type Statfs_t struct { @@ -136,28 +115,28 @@ type Sigset_t struct { Val [4]uint64 } -func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { - return 0, 0, syscall.ENOTSUP +func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) { + return 0, 0, ENOTSUP } func PthreadSigmask(how int, set, oldset *Sigset_t) error { - return errNonLinux + return errNonLinux() } func FcntlInt(fd uintptr, cmd, arg int) (int, error) { - return -1, errNonLinux + return -1, errNonLinux() } func IoctlSetInt(fd int, req uint, value int) error { - return errNonLinux + return errNonLinux() } func Statfs(path string, buf *Statfs_t) error { - return errNonLinux + return errNonLinux() } func Close(fd int) (err error) { - return errNonLinux + return errNonLinux() } type EpollEvent struct { @@ -167,23 +146,23 @@ type EpollEvent struct { } func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - return 0, errNonLinux + return 0, errNonLinux() } func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - return errNonLinux + return errNonLinux() } func Eventfd(initval uint, flags int) (fd int, err error) { - return 0, errNonLinux + return 0, errNonLinux() } func Write(fd int, p []byte) (n int, err error) { - return 0, errNonLinux + return 0, errNonLinux() } func EpollCreate1(flag int) (fd int, err error) { - return 0, errNonLinux + return 0, errNonLinux() } type PerfEventMmapPage struct { @@ -213,15 +192,19 @@ type PerfEventMmapPage struct { } func SetNonblock(fd int, nonblocking bool) (err error) { - return errNonLinux + return errNonLinux() } func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { - return []byte{}, errNonLinux + return []byte{}, errNonLinux() +} + +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + return nil, errNonLinux() } func Munmap(b []byte) (err error) { - return errNonLinux + return errNonLinux() } type PerfEventAttr struct { @@ -246,7 +229,7 @@ type PerfEventAttr struct { } func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { - return 0, errNonLinux + return 0, errNonLinux() } type Utsname struct { @@ -255,7 +238,7 @@ type Utsname struct { } func Uname(buf *Utsname) (err error) { - return errNonLinux + return errNonLinux() } func Getpid() int { @@ -267,35 +250,27 @@ func Gettid() int { } func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - return errNonLinux -} - -func BytePtrFromString(s string) (*byte, error) { - return nil, errNonLinux -} - -func ByteSliceToString(s []byte) string { - return "" + return errNonLinux() } func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) error { - return errNonLinux + return errNonLinux() } func Prlimit(pid, resource int, new, old *Rlimit) error { - return errNonLinux + return errNonLinux() } func Open(path string, mode int, perm uint32) (int, error) { - return -1, errNonLinux + return -1, errNonLinux() } func Fstat(fd int, stat *Stat_t) error { - return errNonLinux + return errNonLinux() } func SetsockoptInt(fd, level, opt, value int) error { - return errNonLinux + return errNonLinux() } type CPUSet struct{} @@ -303,9 +278,13 @@ type CPUSet struct{} func (*CPUSet) Set(int) {} func SchedSetaffinity(pid int, set *CPUSet) error { - return errNonLinux + return errNonLinux() } func SchedGetaffinity(pid int, set *CPUSet) error { - return errNonLinux + return errNonLinux() +} + +func Auxv() ([][2]uintptr, error) { + return nil, errNonLinux() } diff --git a/src/runtime/vendor/github.com/cilium/ebpf/internal/version.go b/src/runtime/vendor/github.com/cilium/ebpf/internal/version.go index acd4650af732..3123dc9f06e7 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/internal/version.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/internal/version.go @@ -2,9 +2,6 @@ package internal import ( "fmt" - "sync" - - "github.com/cilium/ebpf/internal/unix" ) const ( @@ -69,39 +66,9 @@ func (v Version) Kernel() uint32 { // Kernels 4.4 and 4.9 have their SUBLEVEL clamped to 255 to avoid // overflowing into PATCHLEVEL. // See kernel commit 9b82f13e7ef3 ("kbuild: clamp SUBLEVEL to 255"). - s := v[2] - if s > 255 { - s = 255 - } + s := min(v[2], 255) // Truncate members to uint8 to prevent them from spilling over into // each other when overflowing 8 bits. return uint32(uint8(v[0]))<<16 | uint32(uint8(v[1]))<<8 | uint32(uint8(s)) } - -// KernelVersion returns the version of the currently running kernel. -var KernelVersion = sync.OnceValues(func() (Version, error) { - return detectKernelVersion() -}) - -// detectKernelVersion returns the version of the running kernel. -func detectKernelVersion() (Version, error) { - vc, err := vdsoVersion() - if err != nil { - return Version{}, err - } - return NewVersionFromCode(vc), nil -} - -// KernelRelease returns the release string of the running kernel. -// Its format depends on the Linux distribution and corresponds to directory -// names in /lib/modules by convention. Some examples are 5.15.17-1-lts and -// 4.19.0-16-amd64. -func KernelRelease() (string, error) { - var uname unix.Utsname - if err := unix.Uname(&uname); err != nil { - return "", fmt.Errorf("uname failed: %w", err) - } - - return unix.ByteSliceToString(uname.Release[:]), nil -} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/link/anchor.go b/src/runtime/vendor/github.com/cilium/ebpf/link/anchor.go index 1a3b5f7681fc..10fbba079cf2 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/link/anchor.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/link/anchor.go @@ -1,3 +1,5 @@ +//go:build !windows + package link import ( diff --git a/src/runtime/vendor/github.com/cilium/ebpf/link/cgroup.go b/src/runtime/vendor/github.com/cilium/ebpf/link/cgroup.go index f17d34f03c03..1f1416aa2b45 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/link/cgroup.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/link/cgroup.go @@ -1,3 +1,5 @@ +//go:build !windows + package link import ( diff --git a/src/runtime/vendor/github.com/cilium/ebpf/link/iter.go b/src/runtime/vendor/github.com/cilium/ebpf/link/iter.go index 0a39faef8838..40bb69c70861 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/link/iter.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/link/iter.go @@ -1,3 +1,5 @@ +//go:build !windows + package link import ( @@ -42,7 +44,7 @@ func AttachIter(opts IterOptions) (*Iter, error) { attr := sys.LinkCreateIterAttr{ ProgFd: uint32(progFd), AttachType: sys.AttachType(ebpf.AttachTraceIter), - IterInfo: sys.NewPointer(unsafe.Pointer(&info)), + IterInfo: sys.UnsafePointer(unsafe.Pointer(&info)), IterInfoLen: uint32(unsafe.Sizeof(info)), } @@ -75,7 +77,7 @@ func (it *Iter) Open() (io.ReadCloser, error) { return nil, fmt.Errorf("can't create iterator: %w", err) } - return fd.File("bpf_iter"), nil + return fd.File("bpf_iter") } // union bpf_iter_link_info.map diff --git a/src/runtime/vendor/github.com/cilium/ebpf/link/kprobe.go b/src/runtime/vendor/github.com/cilium/ebpf/link/kprobe.go index fe3f17c37174..0912e0a0822f 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/link/kprobe.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/link/kprobe.go @@ -1,3 +1,5 @@ +//go:build !windows + package link import ( @@ -10,6 +12,7 @@ import ( "github.com/cilium/ebpf" "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/linux" "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/tracefs" "github.com/cilium/ebpf/internal/unix" @@ -60,6 +63,9 @@ func (ko *KprobeOptions) cookie() uint64 { // platform's syscall prefix (e.g. __x64_) to support attaching to syscalls // in a portable fashion. // +// On kernels 6.11 and later, setting a kprobe on a nonexistent symbol using +// tracefs incorrectly returns [unix.EINVAL] instead of [os.ErrNotExist]. +// // The returned Link may implement [PerfEvent]. func Kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) { k, err := kprobe(symbol, prog, opts, false) @@ -91,7 +97,7 @@ func Kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error // in a portable fashion. // // On kernels 5.10 and earlier, setting a kretprobe on a nonexistent symbol -// incorrectly returns unix.EINVAL instead of os.ErrNotExist. +// incorrectly returns [unix.EINVAL] instead of [os.ErrNotExist]. // // The returned Link may implement [PerfEvent]. func Kretprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) { @@ -169,7 +175,7 @@ func kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions, ret bool) (* // Use kprobe PMU if the kernel has it available. tp, err := pmuProbe(args) if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) { - if prefix := internal.PlatformPrefix(); prefix != "" { + if prefix := linux.PlatformPrefix(); prefix != "" { args.Symbol = prefix + symbol tp, err = pmuProbe(args) } @@ -177,7 +183,7 @@ func kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions, ret bool) (* if err == nil { return tp, nil } - if err != nil && !errors.Is(err, ErrNotSupported) { + if !errors.Is(err, ErrNotSupported) { return nil, fmt.Errorf("creating perf_kprobe PMU (arch-specific fallback for %q): %w", symbol, err) } @@ -185,7 +191,7 @@ func kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions, ret bool) (* args.Symbol = symbol tp, err = tracefsProbe(args) if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) { - if prefix := internal.PlatformPrefix(); prefix != "" { + if prefix := linux.PlatformPrefix(); prefix != "" { args.Symbol = prefix + symbol tp, err = tracefsProbe(args) } diff --git a/src/runtime/vendor/github.com/cilium/ebpf/link/kprobe_multi.go b/src/runtime/vendor/github.com/cilium/ebpf/link/kprobe_multi.go index f7a8291f9453..3a2b06a24123 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/link/kprobe_multi.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/link/kprobe_multi.go @@ -1,10 +1,11 @@ +//go:build !windows + package link import ( "errors" "fmt" "os" - "unsafe" "github.com/cilium/ebpf" "github.com/cilium/ebpf/asm" @@ -37,6 +38,14 @@ type KprobeMultiOptions struct { // Each Cookie is assigned to the Symbol or Address specified at the // corresponding slice index. Cookies []uint64 + + // Session must be true when attaching Programs with the + // [ebpf.AttachTraceKprobeSession] attach type. + // + // This makes a Kprobe execute on both function entry and return. The entry + // program can share a cookie value with the return program and can decide + // whether the return program gets executed. + Session bool } // KprobeMulti attaches the given eBPF program to the entry point of a given set @@ -60,7 +69,7 @@ func KprobeMulti(prog *ebpf.Program, opts KprobeMultiOptions) (Link, error) { // // Requires at least Linux 5.18. func KretprobeMulti(prog *ebpf.Program, opts KprobeMultiOptions) (Link, error) { - return kprobeMulti(prog, opts, unix.BPF_F_KPROBE_MULTI_RETURN) + return kprobeMulti(prog, opts, sys.BPF_F_KPROBE_MULTI_RETURN) } func kprobeMulti(prog *ebpf.Program, opts KprobeMultiOptions, flags uint32) (Link, error) { @@ -76,15 +85,20 @@ func kprobeMulti(prog *ebpf.Program, opts KprobeMultiOptions, flags uint32) (Lin return nil, fmt.Errorf("one of Symbols or Addresses is required: %w", errInvalidInput) } if syms != 0 && addrs != 0 { - return nil, fmt.Errorf("Symbols and Addresses are mutually exclusive: %w", errInvalidInput) + return nil, fmt.Errorf("fields Symbols and Addresses are mutually exclusive: %w", errInvalidInput) } if cookies > 0 && cookies != syms && cookies != addrs { - return nil, fmt.Errorf("Cookies must be exactly Symbols or Addresses in length: %w", errInvalidInput) + return nil, fmt.Errorf("field Cookies must be exactly Symbols or Addresses in length: %w", errInvalidInput) + } + + attachType := sys.BPF_TRACE_KPROBE_MULTI + if opts.Session { + attachType = sys.BPF_TRACE_KPROBE_SESSION } attr := &sys.LinkCreateKprobeMultiAttr{ ProgFd: uint32(prog.FD()), - AttachType: sys.BPF_TRACE_KPROBE_MULTI, + AttachType: attachType, KprobeMultiFlags: flags, } @@ -95,29 +109,39 @@ func kprobeMulti(prog *ebpf.Program, opts KprobeMultiOptions, flags uint32) (Lin case addrs != 0: attr.Count = addrs - attr.Addrs = sys.NewPointer(unsafe.Pointer(&opts.Addresses[0])) + attr.Addrs = sys.SlicePointer(opts.Addresses) } if cookies != 0 { - attr.Cookies = sys.NewPointer(unsafe.Pointer(&opts.Cookies[0])) + attr.Cookies = sys.SlicePointer(opts.Cookies) } fd, err := sys.LinkCreateKprobeMulti(attr) + if err == nil { + return &kprobeMultiLink{RawLink{fd, ""}}, nil + } + if errors.Is(err, unix.ESRCH) { return nil, fmt.Errorf("couldn't find one or more symbols: %w", os.ErrNotExist) } - if errors.Is(err, unix.EINVAL) { - return nil, fmt.Errorf("%w (missing kernel symbol or prog's AttachType not AttachTraceKprobeMulti?)", err) - } - if err != nil { + if opts.Session { + if haveFeatErr := haveBPFLinkKprobeSession(); haveFeatErr != nil { + return nil, haveFeatErr + } + } else { if haveFeatErr := haveBPFLinkKprobeMulti(); haveFeatErr != nil { return nil, haveFeatErr } - return nil, err } - return &kprobeMultiLink{RawLink{fd, ""}}, nil + // Check EINVAL after running feature probes, since it's also returned when + // the kernel doesn't support the multi/session attach types. + if errors.Is(err, unix.EINVAL) { + return nil, fmt.Errorf("%w (missing kernel symbol or prog's AttachType not %s?)", err, ebpf.AttachType(attachType)) + } + + return nil, err } type kprobeMultiLink struct { @@ -126,7 +150,7 @@ type kprobeMultiLink struct { var _ Link = (*kprobeMultiLink)(nil) -func (kml *kprobeMultiLink) Update(prog *ebpf.Program) error { +func (kml *kprobeMultiLink) Update(_ *ebpf.Program) error { return fmt.Errorf("update kprobe_multi: %w", ErrNotSupported) } @@ -149,7 +173,7 @@ func (kml *kprobeMultiLink) Info() (*Info, error) { }, nil } -var haveBPFLinkKprobeMulti = internal.NewFeatureTest("bpf_link_kprobe_multi", "5.18", func() error { +var haveBPFLinkKprobeMulti = internal.NewFeatureTest("bpf_link_kprobe_multi", func() error { prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ Name: "probe_kpm_link", Type: ebpf.Kprobe, @@ -188,4 +212,45 @@ var haveBPFLinkKprobeMulti = internal.NewFeatureTest("bpf_link_kprobe_multi", "5 fd.Close() return nil -}) +}, "5.18") + +var haveBPFLinkKprobeSession = internal.NewFeatureTest("bpf_link_kprobe_session", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Name: "probe_kps_link", + Type: ebpf.Kprobe, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + AttachType: ebpf.AttachTraceKprobeSession, + License: "MIT", + }) + if errors.Is(err, unix.E2BIG) { + // Kernel doesn't support AttachType field. + return internal.ErrNotSupported + } + if err != nil { + return err + } + defer prog.Close() + + fd, err := sys.LinkCreateKprobeMulti(&sys.LinkCreateKprobeMultiAttr{ + ProgFd: uint32(prog.FD()), + AttachType: sys.BPF_TRACE_KPROBE_SESSION, + Count: 1, + Syms: sys.NewStringSlicePointer([]string{"vprintk"}), + }) + switch { + case errors.Is(err, unix.EINVAL): + return internal.ErrNotSupported + // If CONFIG_FPROBE isn't set. + case errors.Is(err, unix.EOPNOTSUPP): + return internal.ErrNotSupported + case err != nil: + return err + } + + fd.Close() + + return nil +}, "6.10") diff --git a/src/runtime/vendor/github.com/cilium/ebpf/link/link.go b/src/runtime/vendor/github.com/cilium/ebpf/link/link.go index 9c34616c9a9a..425811e777e1 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/link/link.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/link/link.go @@ -11,6 +11,9 @@ import ( "github.com/cilium/ebpf/internal/sys" ) +// Type is the kind of link. +type Type = sys.LinkType + var ErrNotSupported = internal.ErrNotSupported // Link represents a Program attached to a BPF hook. @@ -78,7 +81,9 @@ func NewFromID(id ID) (Link, error) { return wrapRawLink(&RawLink{fd, ""}) } -// LoadPinnedLink loads a link that was persisted into a bpffs. +// LoadPinnedLink loads a Link from a pin (file) on the BPF virtual filesystem. +// +// Requires at least Linux 5.7. func LoadPinnedLink(fileName string, opts *ebpf.LoadPinOptions) (Link, error) { raw, err := loadPinnedRawLink(fileName, opts) if err != nil { @@ -88,51 +93,6 @@ func LoadPinnedLink(fileName string, opts *ebpf.LoadPinOptions) (Link, error) { return wrapRawLink(raw) } -// wrap a RawLink in a more specific type if possible. -// -// The function takes ownership of raw and closes it on error. -func wrapRawLink(raw *RawLink) (_ Link, err error) { - defer func() { - if err != nil { - raw.Close() - } - }() - - info, err := raw.Info() - if err != nil { - return nil, err - } - - switch info.Type { - case RawTracepointType: - return &rawTracepoint{*raw}, nil - case TracingType: - return &tracing{*raw}, nil - case CgroupType: - return &linkCgroup{*raw}, nil - case IterType: - return &Iter{*raw}, nil - case NetNsType: - return &NetNsLink{*raw}, nil - case KprobeMultiType: - return &kprobeMultiLink{*raw}, nil - case UprobeMultiType: - return &uprobeMultiLink{*raw}, nil - case PerfEventType: - return &perfEventLink{*raw, nil}, nil - case TCXType: - return &tcxLink{*raw}, nil - case NetfilterType: - return &netfilterLink{*raw}, nil - case NetkitType: - return &netkitLink{*raw}, nil - case XDPType: - return &xdpLink{*raw}, nil - default: - return raw, nil - } -} - // ID uniquely identifies a BPF link. type ID = sys.LinkID @@ -158,158 +118,6 @@ type Info struct { extra interface{} } -type TracingInfo struct { - AttachType sys.AttachType - TargetObjId uint32 - TargetBtfId sys.TypeID -} - -type CgroupInfo struct { - CgroupId uint64 - AttachType sys.AttachType - _ [4]byte -} - -type NetNsInfo struct { - NetnsIno uint32 - AttachType sys.AttachType -} - -type TCXInfo struct { - Ifindex uint32 - AttachType sys.AttachType -} - -type XDPInfo struct { - Ifindex uint32 -} - -type NetfilterInfo struct { - Pf uint32 - Hooknum uint32 - Priority int32 - Flags uint32 -} - -type NetkitInfo struct { - Ifindex uint32 - AttachType sys.AttachType -} - -type KprobeMultiInfo struct { - count uint32 - flags uint32 - missed uint64 -} - -// AddressCount is the number of addresses hooked by the kprobe. -func (kpm *KprobeMultiInfo) AddressCount() (uint32, bool) { - return kpm.count, kpm.count > 0 -} - -func (kpm *KprobeMultiInfo) Flags() (uint32, bool) { - return kpm.flags, kpm.count > 0 -} - -func (kpm *KprobeMultiInfo) Missed() (uint64, bool) { - return kpm.missed, kpm.count > 0 -} - -type PerfEventInfo struct { - Type sys.PerfEventType - extra interface{} -} - -func (r *PerfEventInfo) Kprobe() *KprobeInfo { - e, _ := r.extra.(*KprobeInfo) - return e -} - -type KprobeInfo struct { - address uint64 - missed uint64 -} - -func (kp *KprobeInfo) Address() (uint64, bool) { - return kp.address, kp.address > 0 -} - -func (kp *KprobeInfo) Missed() (uint64, bool) { - return kp.missed, kp.address > 0 -} - -// Tracing returns tracing type-specific link info. -// -// Returns nil if the type-specific link info isn't available. -func (r Info) Tracing() *TracingInfo { - e, _ := r.extra.(*TracingInfo) - return e -} - -// Cgroup returns cgroup type-specific link info. -// -// Returns nil if the type-specific link info isn't available. -func (r Info) Cgroup() *CgroupInfo { - e, _ := r.extra.(*CgroupInfo) - return e -} - -// NetNs returns netns type-specific link info. -// -// Returns nil if the type-specific link info isn't available. -func (r Info) NetNs() *NetNsInfo { - e, _ := r.extra.(*NetNsInfo) - return e -} - -// XDP returns XDP type-specific link info. -// -// Returns nil if the type-specific link info isn't available. -func (r Info) XDP() *XDPInfo { - e, _ := r.extra.(*XDPInfo) - return e -} - -// TCX returns TCX type-specific link info. -// -// Returns nil if the type-specific link info isn't available. -func (r Info) TCX() *TCXInfo { - e, _ := r.extra.(*TCXInfo) - return e -} - -// Netfilter returns netfilter type-specific link info. -// -// Returns nil if the type-specific link info isn't available. -func (r Info) Netfilter() *NetfilterInfo { - e, _ := r.extra.(*NetfilterInfo) - return e -} - -// Netkit returns netkit type-specific link info. -// -// Returns nil if the type-specific link info isn't available. -func (r Info) Netkit() *NetkitInfo { - e, _ := r.extra.(*NetkitInfo) - return e -} - -// KprobeMulti returns kprobe-multi type-specific link info. -// -// Returns nil if the type-specific link info isn't available. -func (r Info) KprobeMulti() *KprobeMultiInfo { - e, _ := r.extra.(*KprobeMultiInfo) - return e -} - -// PerfEvent returns perf-event type-specific link info. -// -// Returns nil if the type-specific link info isn't available. -func (r Info) PerfEvent() *PerfEventInfo { - e, _ := r.extra.(*PerfEventInfo) - return e -} - // RawLink is the low-level API to bpf_link. // // You should consider using the higher level interfaces in this @@ -319,38 +127,8 @@ type RawLink struct { pinnedPath string } -// AttachRawLink creates a raw link. -func AttachRawLink(opts RawLinkOptions) (*RawLink, error) { - if err := haveBPFLink(); err != nil { - return nil, err - } - - if opts.Target < 0 { - return nil, fmt.Errorf("invalid target: %s", sys.ErrClosedFd) - } - - progFd := opts.Program.FD() - if progFd < 0 { - return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd) - } - - attr := sys.LinkCreateAttr{ - TargetFd: uint32(opts.Target), - ProgFd: uint32(progFd), - AttachType: sys.AttachType(opts.Attach), - TargetBtfId: opts.BTF, - Flags: opts.Flags, - } - fd, err := sys.LinkCreate(&attr) - if err != nil { - return nil, fmt.Errorf("create link: %w", err) - } - - return &RawLink{fd, ""}, nil -} - func loadPinnedRawLink(fileName string, opts *ebpf.LoadPinOptions) (*RawLink, error) { - fd, err := sys.ObjGet(&sys.ObjGetAttr{ + fd, typ, err := sys.ObjGetTyped(&sys.ObjGetAttr{ Pathname: sys.NewStringPointer(fileName), FileFlags: opts.Marshal(), }) @@ -358,6 +136,11 @@ func loadPinnedRawLink(fileName string, opts *ebpf.LoadPinOptions) (*RawLink, er return nil, fmt.Errorf("load pinned link: %w", err) } + if typ != sys.BPF_TYPE_LINK { + _ = fd.Close() + return nil, fmt.Errorf("%s is not a Link", fileName) + } + return &RawLink{fd, fileName}, nil } @@ -380,7 +163,7 @@ func (l *RawLink) Close() error { // Calling Close on a pinned Link will not break the link // until the pin is removed. func (l *RawLink) Pin(fileName string) error { - if err := internal.Pin(l.pinnedPath, fileName, l.fd); err != nil { + if err := sys.Pin(l.pinnedPath, fileName, l.fd); err != nil { return err } l.pinnedPath = fileName @@ -389,7 +172,7 @@ func (l *RawLink) Pin(fileName string) error { // Unpin implements the Link interface. func (l *RawLink) Unpin() error { - if err := internal.Unpin(l.pinnedPath); err != nil { + if err := sys.Unpin(l.pinnedPath); err != nil { return err } l.pinnedPath = "" @@ -436,7 +219,10 @@ func (l *RawLink) UpdateArgs(opts RawLinkUpdateOptions) error { OldProgFd: uint32(oldFd), Flags: opts.Flags, } - return sys.LinkUpdate(&attr) + if err := sys.LinkUpdate(&attr); err != nil { + return fmt.Errorf("update link: %w", err) + } + return nil } // Info returns metadata about the link. diff --git a/src/runtime/vendor/github.com/cilium/ebpf/link/link_other.go b/src/runtime/vendor/github.com/cilium/ebpf/link/link_other.go new file mode 100644 index 000000000000..cd9452fd8380 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/link/link_other.go @@ -0,0 +1,260 @@ +//go:build !windows + +package link + +import ( + "fmt" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/platform" + "github.com/cilium/ebpf/internal/sys" +) + +// Valid link types. +const ( + UnspecifiedType = sys.BPF_LINK_TYPE_UNSPEC + RawTracepointType = sys.BPF_LINK_TYPE_RAW_TRACEPOINT + TracingType = sys.BPF_LINK_TYPE_TRACING + CgroupType = sys.BPF_LINK_TYPE_CGROUP + IterType = sys.BPF_LINK_TYPE_ITER + NetNsType = sys.BPF_LINK_TYPE_NETNS + XDPType = sys.BPF_LINK_TYPE_XDP + PerfEventType = sys.BPF_LINK_TYPE_PERF_EVENT + KprobeMultiType = sys.BPF_LINK_TYPE_KPROBE_MULTI + TCXType = sys.BPF_LINK_TYPE_TCX + UprobeMultiType = sys.BPF_LINK_TYPE_UPROBE_MULTI + NetfilterType = sys.BPF_LINK_TYPE_NETFILTER + NetkitType = sys.BPF_LINK_TYPE_NETKIT +) + +// AttachRawLink creates a raw link. +func AttachRawLink(opts RawLinkOptions) (*RawLink, error) { + if err := haveBPFLink(); err != nil { + return nil, err + } + + if opts.Target < 0 { + return nil, fmt.Errorf("invalid target: %s", sys.ErrClosedFd) + } + + progFd := opts.Program.FD() + if progFd < 0 { + return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd) + } + + p, attachType := platform.DecodeConstant(opts.Attach) + if p != platform.Linux { + return nil, fmt.Errorf("attach type %s: %w", opts.Attach, internal.ErrNotSupportedOnOS) + } + + attr := sys.LinkCreateAttr{ + TargetFd: uint32(opts.Target), + ProgFd: uint32(progFd), + AttachType: sys.AttachType(attachType), + TargetBtfId: opts.BTF, + Flags: opts.Flags, + } + fd, err := sys.LinkCreate(&attr) + if err != nil { + return nil, fmt.Errorf("create link: %w", err) + } + + return &RawLink{fd, ""}, nil +} + +// wrap a RawLink in a more specific type if possible. +// +// The function takes ownership of raw and closes it on error. +func wrapRawLink(raw *RawLink) (_ Link, err error) { + defer func() { + if err != nil { + raw.Close() + } + }() + + info, err := raw.Info() + if err != nil { + return nil, err + } + + switch info.Type { + case RawTracepointType: + return &rawTracepoint{*raw}, nil + case TracingType: + return &tracing{*raw}, nil + case CgroupType: + return &linkCgroup{*raw}, nil + case IterType: + return &Iter{*raw}, nil + case NetNsType: + return &NetNsLink{*raw}, nil + case KprobeMultiType: + return &kprobeMultiLink{*raw}, nil + case UprobeMultiType: + return &uprobeMultiLink{*raw}, nil + case PerfEventType: + return &perfEventLink{*raw, nil}, nil + case TCXType: + return &tcxLink{*raw}, nil + case NetfilterType: + return &netfilterLink{*raw}, nil + case NetkitType: + return &netkitLink{*raw}, nil + case XDPType: + return &xdpLink{*raw}, nil + default: + return raw, nil + } +} + +type TracingInfo struct { + AttachType sys.AttachType + TargetObjId uint32 + TargetBtfId sys.TypeID +} + +type CgroupInfo struct { + CgroupId uint64 + AttachType sys.AttachType + _ [4]byte +} + +type NetNsInfo struct { + NetnsIno uint32 + AttachType sys.AttachType +} + +type TCXInfo struct { + Ifindex uint32 + AttachType sys.AttachType +} + +type XDPInfo struct { + Ifindex uint32 +} + +type NetfilterInfo struct { + Pf uint32 + Hooknum uint32 + Priority int32 + Flags uint32 +} + +type NetkitInfo struct { + Ifindex uint32 + AttachType sys.AttachType +} + +type KprobeMultiInfo struct { + count uint32 + flags uint32 + missed uint64 +} + +// AddressCount is the number of addresses hooked by the kprobe. +func (kpm *KprobeMultiInfo) AddressCount() (uint32, bool) { + return kpm.count, kpm.count > 0 +} + +func (kpm *KprobeMultiInfo) Flags() (uint32, bool) { + return kpm.flags, kpm.count > 0 +} + +func (kpm *KprobeMultiInfo) Missed() (uint64, bool) { + return kpm.missed, kpm.count > 0 +} + +type PerfEventInfo struct { + Type sys.PerfEventType + extra interface{} +} + +func (r *PerfEventInfo) Kprobe() *KprobeInfo { + e, _ := r.extra.(*KprobeInfo) + return e +} + +type KprobeInfo struct { + address uint64 + missed uint64 +} + +func (kp *KprobeInfo) Address() (uint64, bool) { + return kp.address, kp.address > 0 +} + +func (kp *KprobeInfo) Missed() (uint64, bool) { + return kp.missed, kp.address > 0 +} + +// Tracing returns tracing type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) Tracing() *TracingInfo { + e, _ := r.extra.(*TracingInfo) + return e +} + +// Cgroup returns cgroup type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) Cgroup() *CgroupInfo { + e, _ := r.extra.(*CgroupInfo) + return e +} + +// NetNs returns netns type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) NetNs() *NetNsInfo { + e, _ := r.extra.(*NetNsInfo) + return e +} + +// XDP returns XDP type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) XDP() *XDPInfo { + e, _ := r.extra.(*XDPInfo) + return e +} + +// TCX returns TCX type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) TCX() *TCXInfo { + e, _ := r.extra.(*TCXInfo) + return e +} + +// Netfilter returns netfilter type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) Netfilter() *NetfilterInfo { + e, _ := r.extra.(*NetfilterInfo) + return e +} + +// Netkit returns netkit type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) Netkit() *NetkitInfo { + e, _ := r.extra.(*NetkitInfo) + return e +} + +// KprobeMulti returns kprobe-multi type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) KprobeMulti() *KprobeMultiInfo { + e, _ := r.extra.(*KprobeMultiInfo) + return e +} + +// PerfEvent returns perf-event type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) PerfEvent() *PerfEventInfo { + e, _ := r.extra.(*PerfEventInfo) + return e +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/link/link_windows.go b/src/runtime/vendor/github.com/cilium/ebpf/link/link_windows.go new file mode 100644 index 000000000000..d9c6f889053c --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/link/link_windows.go @@ -0,0 +1,48 @@ +package link + +import ( + "fmt" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/efw" + "github.com/cilium/ebpf/internal/platform" + "github.com/cilium/ebpf/internal/sys" +) + +// AttachRawLink creates a raw link. +func AttachRawLink(opts RawLinkOptions) (*RawLink, error) { + if opts.Target != 0 || opts.BTF != 0 || opts.Flags != 0 { + return nil, fmt.Errorf("specified option(s) %w", internal.ErrNotSupportedOnOS) + } + + plat, attachType := platform.DecodeConstant(opts.Attach) + if plat != platform.Windows { + return nil, fmt.Errorf("attach type %s: %w", opts.Attach, internal.ErrNotSupportedOnOS) + } + + attachTypeGUID, err := efw.EbpfGetEbpfAttachType(attachType) + if err != nil { + return nil, fmt.Errorf("get attach type: %w", err) + } + + progFd := opts.Program.FD() + if progFd < 0 { + return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd) + } + + raw, err := efw.EbpfProgramAttachFds(progFd, attachTypeGUID, nil, 0) + if err != nil { + return nil, fmt.Errorf("attach link: %w", err) + } + + fd, err := sys.NewFD(int(raw)) + if err != nil { + return nil, err + } + + return &RawLink{fd: fd}, nil +} + +func wrapRawLink(raw *RawLink) (Link, error) { + return raw, nil +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/link/netfilter.go b/src/runtime/vendor/github.com/cilium/ebpf/link/netfilter.go index 34be39085976..90e914c51992 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/link/netfilter.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/link/netfilter.go @@ -1,3 +1,5 @@ +//go:build !windows + package link import ( @@ -63,7 +65,7 @@ func AttachNetfilter(opts NetfilterOptions) (Link, error) { return &netfilterLink{RawLink{fd, ""}}, nil } -func (*netfilterLink) Update(new *ebpf.Program) error { +func (*netfilterLink) Update(_ *ebpf.Program) error { return fmt.Errorf("netfilter update: %w", ErrNotSupported) } diff --git a/src/runtime/vendor/github.com/cilium/ebpf/link/netkit.go b/src/runtime/vendor/github.com/cilium/ebpf/link/netkit.go index 5eee3b023ae6..5e6a321af157 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/link/netkit.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/link/netkit.go @@ -1,3 +1,5 @@ +//go:build !windows + package link import ( diff --git a/src/runtime/vendor/github.com/cilium/ebpf/link/netns.go b/src/runtime/vendor/github.com/cilium/ebpf/link/netns.go index b1edd340a3fb..a9f7ee79c932 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/link/netns.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/link/netns.go @@ -1,3 +1,5 @@ +//go:build !windows + package link import ( diff --git a/src/runtime/vendor/github.com/cilium/ebpf/link/perf_event.go b/src/runtime/vendor/github.com/cilium/ebpf/link/perf_event.go index 1d8feb58c1c0..f1f5b84a9e30 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/link/perf_event.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/link/perf_event.go @@ -1,3 +1,5 @@ +//go:build !windows + package link import ( @@ -115,7 +117,7 @@ func (pl *perfEventLink) Close() error { return nil } -func (pl *perfEventLink) Update(prog *ebpf.Program) error { +func (pl *perfEventLink) Update(_ *ebpf.Program) error { return fmt.Errorf("perf event link update: %w", ErrNotSupported) } @@ -132,7 +134,7 @@ func (pl *perfEventLink) PerfEvent() (*os.File, error) { return nil, err } - return fd.File("perf-event"), nil + return fd.File("perf-event") } func (pl *perfEventLink) Info() (*Info, error) { @@ -185,7 +187,7 @@ func (pi *perfEventIoctl) isLink() {} // // Detaching a program from a perf event is currently not possible, so a // program replacement mechanism cannot be implemented for perf events. -func (pi *perfEventIoctl) Update(prog *ebpf.Program) error { +func (pi *perfEventIoctl) Update(_ *ebpf.Program) error { return fmt.Errorf("perf event ioctl update: %w", ErrNotSupported) } @@ -209,7 +211,7 @@ func (pi *perfEventIoctl) PerfEvent() (*os.File, error) { return nil, err } - return fd.File("perf-event"), nil + return fd.File("perf-event") } // attach the given eBPF prog to the perf event stored in pe. @@ -303,7 +305,7 @@ func openTracepointPerfEvent(tid uint64, pid int) (*sys.FD, error) { // // https://elixir.bootlin.com/linux/v5.16.8/source/kernel/bpf/syscall.c#L4307 // https://github.com/torvalds/linux/commit/b89fbfbb854c9afc3047e8273cc3a694650b802e -var haveBPFLinkPerfEvent = internal.NewFeatureTest("bpf_link_perf_event", "5.15", func() error { +var haveBPFLinkPerfEvent = internal.NewFeatureTest("bpf_link_perf_event", func() error { prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ Name: "probe_bpf_perf_link", Type: ebpf.Kprobe, @@ -329,4 +331,4 @@ var haveBPFLinkPerfEvent = internal.NewFeatureTest("bpf_link_perf_event", "5.15" return nil } return err -}) +}, "5.15") diff --git a/src/runtime/vendor/github.com/cilium/ebpf/link/program.go b/src/runtime/vendor/github.com/cilium/ebpf/link/program.go index d8a2a15f9379..dbd7a9727c8c 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/link/program.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/link/program.go @@ -1,3 +1,5 @@ +//go:build !windows + package link import ( diff --git a/src/runtime/vendor/github.com/cilium/ebpf/link/query.go b/src/runtime/vendor/github.com/cilium/ebpf/link/query.go index fe534f8efadb..eeca82811626 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/link/query.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/link/query.go @@ -1,8 +1,9 @@ +//go:build !windows + package link import ( "fmt" - "unsafe" "github.com/cilium/ebpf" "github.com/cilium/ebpf/internal/sys" @@ -84,13 +85,13 @@ func QueryPrograms(opts QueryOptions) (*QueryResult, error) { AttachType: sys.AttachType(opts.Attach), QueryFlags: opts.QueryFlags, Count: count, - ProgIds: sys.NewPointer(unsafe.Pointer(&progIds[0])), + ProgIds: sys.SlicePointer(progIds), } var linkIds []ID if haveLinkIDs { linkIds = make([]ID, count) - attr.LinkIds = sys.NewPointer(unsafe.Pointer(&linkIds[0])) + attr.LinkIds = sys.SlicePointer(linkIds) } if err := sys.ProgQuery(&attr); err != nil { diff --git a/src/runtime/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go b/src/runtime/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go index 925e621cbbc7..4be9c6a276df 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go @@ -1,3 +1,5 @@ +//go:build !windows + package link import ( diff --git a/src/runtime/vendor/github.com/cilium/ebpf/link/socket_filter.go b/src/runtime/vendor/github.com/cilium/ebpf/link/socket_filter.go index 84f0b656f80a..8399f023157f 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/link/socket_filter.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/link/socket_filter.go @@ -1,3 +1,5 @@ +//go:build !windows + package link import ( diff --git a/src/runtime/vendor/github.com/cilium/ebpf/link/syscalls.go b/src/runtime/vendor/github.com/cilium/ebpf/link/syscalls.go index d09b5acb0f35..9948dead414a 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/link/syscalls.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/link/syscalls.go @@ -1,3 +1,5 @@ +//go:build !windows + package link import ( @@ -10,27 +12,7 @@ import ( "github.com/cilium/ebpf/internal/unix" ) -// Type is the kind of link. -type Type = sys.LinkType - -// Valid link types. -const ( - UnspecifiedType = sys.BPF_LINK_TYPE_UNSPEC - RawTracepointType = sys.BPF_LINK_TYPE_RAW_TRACEPOINT - TracingType = sys.BPF_LINK_TYPE_TRACING - CgroupType = sys.BPF_LINK_TYPE_CGROUP - IterType = sys.BPF_LINK_TYPE_ITER - NetNsType = sys.BPF_LINK_TYPE_NETNS - XDPType = sys.BPF_LINK_TYPE_XDP - PerfEventType = sys.BPF_LINK_TYPE_PERF_EVENT - KprobeMultiType = sys.BPF_LINK_TYPE_KPROBE_MULTI - TCXType = sys.BPF_LINK_TYPE_TCX - UprobeMultiType = sys.BPF_LINK_TYPE_UPROBE_MULTI - NetfilterType = sys.BPF_LINK_TYPE_NETFILTER - NetkitType = sys.BPF_LINK_TYPE_NETKIT -) - -var haveProgAttach = internal.NewFeatureTest("BPF_PROG_ATTACH", "4.10", func() error { +var haveProgAttach = internal.NewFeatureTest("BPF_PROG_ATTACH", func() error { prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ Type: ebpf.CGroupSKB, License: "MIT", @@ -48,9 +30,9 @@ var haveProgAttach = internal.NewFeatureTest("BPF_PROG_ATTACH", "4.10", func() e // have the syscall. prog.Close() return nil -}) +}, "4.10") -var haveProgAttachReplace = internal.NewFeatureTest("BPF_PROG_ATTACH atomic replacement of MULTI progs", "5.5", func() error { +var haveProgAttachReplace = internal.NewFeatureTest("BPF_PROG_ATTACH atomic replacement of MULTI progs", func() error { if err := haveProgAttach(); err != nil { return err } @@ -90,9 +72,9 @@ var haveProgAttachReplace = internal.NewFeatureTest("BPF_PROG_ATTACH atomic repl return nil } return err -}) +}, "5.5") -var haveBPFLink = internal.NewFeatureTest("bpf_link", "5.7", func() error { +var haveBPFLink = internal.NewFeatureTest("bpf_link", func() error { attr := sys.LinkCreateAttr{ // This is a hopefully invalid file descriptor, which triggers EBADF. TargetFd: ^uint32(0), @@ -107,9 +89,9 @@ var haveBPFLink = internal.NewFeatureTest("bpf_link", "5.7", func() error { return nil } return err -}) +}, "5.7") -var haveProgQuery = internal.NewFeatureTest("BPF_PROG_QUERY", "4.15", func() error { +var haveProgQuery = internal.NewFeatureTest("BPF_PROG_QUERY", func() error { attr := sys.ProgQueryAttr{ // We rely on this being checked during the syscall. // With an otherwise correct payload we expect EBADF here @@ -127,9 +109,9 @@ var haveProgQuery = internal.NewFeatureTest("BPF_PROG_QUERY", "4.15", func() err return ErrNotSupported } return errors.New("syscall succeeded unexpectedly") -}) +}, "4.15") -var haveTCX = internal.NewFeatureTest("tcx", "6.6", func() error { +var haveTCX = internal.NewFeatureTest("tcx", func() error { prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ Type: ebpf.SchedCLS, License: "MIT", @@ -162,9 +144,9 @@ var haveTCX = internal.NewFeatureTest("tcx", "6.6", func() error { return ErrNotSupported } return errors.New("syscall succeeded unexpectedly") -}) +}, "6.6") -var haveNetkit = internal.NewFeatureTest("netkit", "6.7", func() error { +var haveNetkit = internal.NewFeatureTest("netkit", func() error { prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ Type: ebpf.SchedCLS, License: "MIT", @@ -197,4 +179,4 @@ var haveNetkit = internal.NewFeatureTest("netkit", "6.7", func() error { return ErrNotSupported } return errors.New("syscall succeeded unexpectedly") -}) +}, "6.7") diff --git a/src/runtime/vendor/github.com/cilium/ebpf/link/tcx.go b/src/runtime/vendor/github.com/cilium/ebpf/link/tcx.go index ac045b71da0e..8661018ec174 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/link/tcx.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/link/tcx.go @@ -1,3 +1,5 @@ +//go:build !windows + package link import ( diff --git a/src/runtime/vendor/github.com/cilium/ebpf/link/tracepoint.go b/src/runtime/vendor/github.com/cilium/ebpf/link/tracepoint.go index 6fc78b982872..514961ebec9f 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/link/tracepoint.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/link/tracepoint.go @@ -1,3 +1,5 @@ +//go:build !windows + package link import ( diff --git a/src/runtime/vendor/github.com/cilium/ebpf/link/tracing.go b/src/runtime/vendor/github.com/cilium/ebpf/link/tracing.go index 9e570afc96a8..b33b3dc0eb1d 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/link/tracing.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/link/tracing.go @@ -1,3 +1,5 @@ +//go:build !windows + package link import ( @@ -14,7 +16,7 @@ type tracing struct { RawLink } -func (f *tracing) Update(new *ebpf.Program) error { +func (f *tracing) Update(_ *ebpf.Program) error { return fmt.Errorf("tracing update: %w", ErrNotSupported) } diff --git a/src/runtime/vendor/github.com/cilium/ebpf/link/uprobe.go b/src/runtime/vendor/github.com/cilium/ebpf/link/uprobe.go index 194d1d319a7b..d20997e9d8f1 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/link/uprobe.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/link/uprobe.go @@ -1,3 +1,5 @@ +//go:build !windows + package link import ( @@ -16,7 +18,7 @@ var ( uprobeRefCtrOffsetPMUPath = "/sys/bus/event_source/devices/uprobe/format/ref_ctr_offset" // elixir.bootlin.com/linux/v5.15-rc7/source/kernel/events/core.c#L9799 uprobeRefCtrOffsetShift = 32 - haveRefCtrOffsetPMU = internal.NewFeatureTest("RefCtrOffsetPMU", "4.20", func() error { + haveRefCtrOffsetPMU = internal.NewFeatureTest("RefCtrOffsetPMU", func() error { _, err := os.Stat(uprobeRefCtrOffsetPMUPath) if errors.Is(err, os.ErrNotExist) { return internal.ErrNotSupported @@ -25,7 +27,7 @@ var ( return err } return nil - }) + }, "4.20") // ErrNoSymbol indicates that the given symbol was not found // in the ELF symbols table. @@ -321,7 +323,7 @@ func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti if err == nil { return tp, nil } - if err != nil && !errors.Is(err, ErrNotSupported) { + if !errors.Is(err, ErrNotSupported) { return nil, fmt.Errorf("creating perf_uprobe PMU: %w", err) } diff --git a/src/runtime/vendor/github.com/cilium/ebpf/link/uprobe_multi.go b/src/runtime/vendor/github.com/cilium/ebpf/link/uprobe_multi.go index aea807b329ae..e34ad7168bf0 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/link/uprobe_multi.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/link/uprobe_multi.go @@ -1,10 +1,11 @@ +//go:build !windows + package link import ( "errors" "fmt" "os" - "unsafe" "github.com/cilium/ebpf" "github.com/cilium/ebpf/asm" @@ -47,7 +48,7 @@ func (ex *Executable) UretprobeMulti(symbols []string, prog *ebpf.Program, opts // The return probe is not limited for symbols entry, so there's no special // setup for return uprobes (other than the extra flag). The symbols, opts.Offsets // and opts.Addresses arrays follow the same logic as for entry uprobes. - return ex.uprobeMulti(symbols, prog, opts, unix.BPF_F_UPROBE_MULTI_RETURN) + return ex.uprobeMulti(symbols, prog, opts, sys.BPF_F_UPROBE_MULTI_RETURN) } func (ex *Executable) uprobeMulti(symbols []string, prog *ebpf.Program, opts *UprobeMultiOptions, flags uint32) (Link, error) { @@ -69,13 +70,13 @@ func (ex *Executable) uprobeMulti(symbols []string, prog *ebpf.Program, opts *Up refCtrOffsets := len(opts.RefCtrOffsets) if addrs == 0 { - return nil, fmt.Errorf("Addresses are required: %w", errInvalidInput) + return nil, fmt.Errorf("field Addresses is required: %w", errInvalidInput) } if refCtrOffsets > 0 && refCtrOffsets != addrs { - return nil, fmt.Errorf("RefCtrOffsets must be exactly Addresses in length: %w", errInvalidInput) + return nil, fmt.Errorf("field RefCtrOffsets must be exactly Addresses in length: %w", errInvalidInput) } if cookies > 0 && cookies != addrs { - return nil, fmt.Errorf("Cookies must be exactly Addresses in length: %w", errInvalidInput) + return nil, fmt.Errorf("field Cookies must be exactly Addresses in length: %w", errInvalidInput) } attr := &sys.LinkCreateUprobeMultiAttr{ @@ -84,23 +85,26 @@ func (ex *Executable) uprobeMulti(symbols []string, prog *ebpf.Program, opts *Up AttachType: sys.BPF_TRACE_UPROBE_MULTI, UprobeMultiFlags: flags, Count: uint32(addrs), - Offsets: sys.NewPointer(unsafe.Pointer(&addresses[0])), + Offsets: sys.SlicePointer(addresses), Pid: opts.PID, } if refCtrOffsets != 0 { - attr.RefCtrOffsets = sys.NewPointer(unsafe.Pointer(&opts.RefCtrOffsets[0])) + attr.RefCtrOffsets = sys.SlicePointer(opts.RefCtrOffsets) } if cookies != 0 { - attr.Cookies = sys.NewPointer(unsafe.Pointer(&opts.Cookies[0])) + attr.Cookies = sys.SlicePointer(opts.Cookies) } fd, err := sys.LinkCreateUprobeMulti(attr) if errors.Is(err, unix.ESRCH) { return nil, fmt.Errorf("%w (specified pid not found?)", os.ErrNotExist) } + // Since Linux commit 46ba0e49b642 ("bpf: fix multi-uprobe PID filtering + // logic"), if the provided pid overflows MaxInt32 (turning it negative), the + // kernel will return EINVAL instead of ESRCH. if errors.Is(err, unix.EINVAL) { - return nil, fmt.Errorf("%w (missing symbol or prog's AttachType not AttachTraceUprobeMulti?)", err) + return nil, fmt.Errorf("%w (invalid pid, missing symbol or prog's AttachType not AttachTraceUprobeMulti?)", err) } if err != nil { @@ -168,11 +172,11 @@ type uprobeMultiLink struct { var _ Link = (*uprobeMultiLink)(nil) -func (kml *uprobeMultiLink) Update(prog *ebpf.Program) error { +func (kml *uprobeMultiLink) Update(_ *ebpf.Program) error { return fmt.Errorf("update uprobe_multi: %w", ErrNotSupported) } -var haveBPFLinkUprobeMulti = internal.NewFeatureTest("bpf_link_uprobe_multi", "6.6", func() error { +var haveBPFLinkUprobeMulti = internal.NewFeatureTest("bpf_link_uprobe_multi", func() error { prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ Name: "probe_upm_link", Type: ebpf.Kprobe, @@ -198,7 +202,7 @@ var haveBPFLinkUprobeMulti = internal.NewFeatureTest("bpf_link_uprobe_multi", "6 ProgFd: uint32(prog.FD()), AttachType: sys.BPF_TRACE_UPROBE_MULTI, Path: sys.NewStringPointer("/"), - Offsets: sys.NewPointer(unsafe.Pointer(&[]uint64{0})), + Offsets: sys.SlicePointer([]uint64{0}), Count: 1, }) switch { @@ -213,4 +217,4 @@ var haveBPFLinkUprobeMulti = internal.NewFeatureTest("bpf_link_uprobe_multi", "6 // should not happen fd.Close() return errors.New("successfully attached uprobe_multi to /, kernel bug?") -}) +}, "6.6") diff --git a/src/runtime/vendor/github.com/cilium/ebpf/link/xdp.go b/src/runtime/vendor/github.com/cilium/ebpf/link/xdp.go index 2ec441229a51..2daf0c4a226c 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/link/xdp.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/link/xdp.go @@ -1,3 +1,5 @@ +//go:build !windows + package link import ( diff --git a/src/runtime/vendor/github.com/cilium/ebpf/linker.go b/src/runtime/vendor/github.com/cilium/ebpf/linker.go index 788f21b7b6fc..0b966477f367 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/linker.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/linker.go @@ -9,10 +9,13 @@ import ( "io/fs" "math" "slices" + "strings" "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/btf" "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/kallsyms" + "github.com/cilium/ebpf/internal/platform" ) // handles stores handle objects to avoid gc cleanup @@ -121,7 +124,7 @@ func hasFunctionReferences(insns asm.Instructions) bool { // // Passing a nil target will relocate against the running kernel. insns are // modified in place. -func applyRelocations(insns asm.Instructions, targets []*btf.Spec, kmodName string, bo binary.ByteOrder, b *btf.Builder) error { +func applyRelocations(insns asm.Instructions, bo binary.ByteOrder, b *btf.Builder, c *btf.Cache) error { var relos []*btf.CORERelocation var reloInsns []*asm.Instruction iter := insns.Iterate() @@ -140,23 +143,28 @@ func applyRelocations(insns asm.Instructions, targets []*btf.Spec, kmodName stri bo = internal.NativeEndian } - if len(targets) == 0 { - kernelTarget, err := btf.LoadKernelSpec() + kernelTarget, err := c.Kernel() + if err != nil { + return fmt.Errorf("load kernel spec: %w", err) + } + + modules, err := c.Modules() + // Ignore ErrNotExists to cater to kernels which have CONFIG_DEBUG_INFO_BTF_MODULES + // or CONFIG_DEBUG_INFO_BTF disabled. + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return err + } + + targets := make([]*btf.Spec, 0, 1+len(modules)) + targets = append(targets, kernelTarget) + + for _, kmod := range modules { + spec, err := c.Module(kmod) if err != nil { - return fmt.Errorf("load kernel spec: %w", err) + return fmt.Errorf("load BTF for kmod %s: %w", kmod, err) } - targets = append(targets, kernelTarget) - if kmodName != "" { - kmodTarget, err := btf.LoadKernelModuleSpec(kmodName) - // Ignore ErrNotExists to cater to kernels which have CONFIG_DEBUG_INFO_BTF_MODULES disabled. - if err != nil && !errors.Is(err, fs.ErrNotExist) { - return fmt.Errorf("load kernel module spec: %w", err) - } - if err == nil { - targets = append(targets, kmodTarget) - } - } + targets = append(targets, spec) } fixups, err := btf.CORERelocate(relos, targets, bo, b.Add) @@ -205,13 +213,19 @@ func flattenPrograms(progs map[string]*ProgramSpec, names []string) { // dependencies of each program. func flattenInstructions(name string, progs map[string]*ProgramSpec, refs map[*ProgramSpec][]string) asm.Instructions { prog := progs[name] + progRefs := refs[prog] + + if len(progRefs) == 0 { + // No references, nothing to do. + return prog.Instructions + } insns := make(asm.Instructions, len(prog.Instructions)) copy(insns, prog.Instructions) // Add all direct references of prog to the list of to be linked programs. - pending := make([]string, len(refs[prog])) - copy(pending, refs[prog]) + pending := make([]string, len(progRefs)) + copy(pending, progRefs) // All references for which we've appended instructions. linked := make(map[string]bool) @@ -264,9 +278,11 @@ func fixupAndValidate(insns asm.Instructions) error { return nil } -// POISON_CALL_KFUNC_BASE in libbpf. -// https://github.com/libbpf/libbpf/blob/2778cbce609aa1e2747a69349f7f46a2f94f0522/src/libbpf.c#L5767 -const kfuncCallPoisonBase = 2002000000 +// A constant used to poison calls to non-existent kfuncs. +// +// Similar POISON_CALL_KFUNC_BASE in libbpf, except that we use a value lower +// than 2^28 to fit into a tagged constant. +const kfuncCallPoisonBase = 0xdedc0de // fixupKfuncs loops over all instructions in search for kfunc calls. // If at least one is found, the current kernels BTF and module BTFis are searched to set Instruction.Constant @@ -289,9 +305,15 @@ func fixupKfuncs(insns asm.Instructions) (_ handles, err error) { return nil, nil fixups: - // only load the kernel spec if we found at least one kfunc call + // Only load kernel BTF if we found at least one kfunc call. kernelSpec can be + // nil if the kernel does not have BTF, in which case we poison all kfunc + // calls. kernelSpec, err := btf.LoadKernelSpec() - if err != nil { + // ErrNotSupportedOnOS wraps ErrNotSupported, check for it first. + if errors.Is(err, internal.ErrNotSupportedOnOS) { + return nil, fmt.Errorf("kfuncs are not supported on this platform: %w", err) + } + if err != nil && !errors.Is(err, ErrNotSupported) { return nil, err } @@ -316,32 +338,36 @@ fixups: return nil, fmt.Errorf("kfuncMetaKey doesn't contain kfuncMeta") } + // findTargetInKernel returns btf.ErrNotFound if the input btf.Spec is nil. target := btf.Type((*btf.Func)(nil)) spec, module, err := findTargetInKernel(kernelSpec, kfm.Func.Name, &target) - if kfm.Binding == elf.STB_WEAK && errors.Is(err, btf.ErrNotFound) { - if ins.IsKfuncCall() { - // If the kfunc call is weak and not found, poison the call. Use a recognizable constant - // to make it easier to debug. And set src to zero so the verifier doesn't complain - // about the invalid imm/offset values before dead-code elimination. - ins.Constant = kfuncCallPoisonBase - ins.Src = 0 - } else if ins.OpCode.IsDWordLoad() { - // If the kfunc DWordLoad is weak and not found, set its address to 0. - ins.Constant = 0 - ins.Src = 0 - } else { - return nil, fmt.Errorf("only kfunc calls and dword loads may have kfunc metadata") + if errors.Is(err, btf.ErrNotFound) { + if kfm.Binding == elf.STB_WEAK { + if ins.IsKfuncCall() { + // If the kfunc call is weak and not found, poison the call. Use a + // recognizable constant to make it easier to debug. + fn, err := asm.BuiltinFuncForPlatform(platform.Native, kfuncCallPoisonBase) + if err != nil { + return nil, err + } + *ins = fn.Call() + } else if ins.OpCode.IsDWordLoad() { + // If the kfunc DWordLoad is weak and not found, set its address to 0. + ins.Constant = 0 + ins.Src = 0 + } else { + return nil, fmt.Errorf("only kfunc calls and dword loads may have kfunc metadata") + } + + iter.Next() + continue } - iter.Next() - continue - } - // Error on non-weak kfunc not found. - if errors.Is(err, btf.ErrNotFound) { + // Error on non-weak kfunc not found. return nil, fmt.Errorf("kfunc %q: %w", kfm.Func.Name, ErrNotSupported) } if err != nil { - return nil, err + return nil, fmt.Errorf("finding kfunc in kernel: %w", err) } idx, err := fdArray.add(module) @@ -457,3 +483,42 @@ func resolveKconfigReferences(insns asm.Instructions) (_ *Map, err error) { return kconfig, nil } + +func resolveKsymReferences(insns asm.Instructions) error { + var missing []string + + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + meta, _ := ins.Metadata.Get(ksymMetaKey{}).(*ksymMeta) + if meta == nil { + continue + } + + addr, err := kallsyms.Address(meta.Name) + if err != nil { + return fmt.Errorf("resolve ksym %s: %w", meta.Name, err) + } + if addr != 0 { + ins.Constant = int64(addr) + continue + } + + if meta.Binding == elf.STB_WEAK { + // A weak ksym variable in eBPF C means its resolution is optional. + // Set a zero constant explicitly for clarity. + ins.Constant = 0 + continue + } + + if !slices.Contains(missing, meta.Name) { + missing = append(missing, meta.Name) + } + } + + if len(missing) > 0 { + return fmt.Errorf("kernel is missing symbol: %s", strings.Join(missing, ",")) + } + + return nil +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/map.go b/src/runtime/vendor/github.com/cilium/ebpf/map.go index 0b62101c3cb0..55974b4ebcf9 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/map.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/map.go @@ -17,6 +17,7 @@ import ( "github.com/cilium/ebpf/btf" "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/platform" "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/sysenc" "github.com/cilium/ebpf/internal/unix" @@ -45,12 +46,13 @@ type MapOptions struct { } // MapID represents the unique ID of an eBPF map -type MapID uint32 +type MapID = sys.MapID // MapSpec defines a Map. type MapSpec struct { - // Name is passed to the kernel as a debug aid. Must only contain - // alpha numeric and '_' characters. + // Name is passed to the kernel as a debug aid. + // + // Unsupported characters will be stripped. Name string Type MapType KeySize uint32 @@ -66,16 +68,13 @@ type MapSpec struct { Pinning PinType // Specify numa node during map creation - // (effective only if unix.BPF_F_NUMA_NODE flag is set, + // (effective only if sys.BPF_F_NUMA_NODE flag is set, // which can be imported from golang.org/x/sys/unix) NumaNode uint32 // The initial contents of the map. May be nil. Contents []MapKV - // Whether to freeze a map after setting its initial contents. - Freeze bool - // InnerMap is used as a template for ArrayOfMaps and HashOfMaps InnerMap *MapSpec @@ -86,6 +85,11 @@ type MapSpec struct { // The key and value type of this map. May be nil. Key, Value btf.Type + + // Tags is a list of btf_decl_tag attributes set on the map definition. + // + // Decorate a map definition with `__attribute__((btf_decl_tag("foo")))`. + Tags []string } func (ms *MapSpec) String() string { @@ -104,6 +108,7 @@ func (ms *MapSpec) Copy() *MapSpec { cpy.Contents = slices.Clone(cpy.Contents) cpy.Key = btf.Copy(cpy.Key) cpy.Value = btf.Copy(cpy.Value) + cpy.Tags = slices.Clone(cpy.Tags) if cpy.InnerMap == ms { cpy.InnerMap = &cpy @@ -126,8 +131,8 @@ func (ms *MapSpec) Copy() *MapSpec { // The copy is only performed if fixups are necessary, so callers mustn't mutate // the returned spec. func (spec *MapSpec) fixupMagicFields() (*MapSpec, error) { - switch spec.Type { - case ArrayOfMaps, HashOfMaps: + switch { + case spec.Type.canStoreMap(): if spec.ValueSize != 0 && spec.ValueSize != 4 { return nil, errors.New("ValueSize must be zero or four for map of map") } @@ -135,7 +140,7 @@ func (spec *MapSpec) fixupMagicFields() (*MapSpec, error) { spec = spec.Copy() spec.ValueSize = 4 - case PerfEventArray: + case spec.Type == PerfEventArray: if spec.KeySize != 0 && spec.KeySize != 4 { return nil, errors.New("KeySize must be zero or four for perf event array") } @@ -161,6 +166,17 @@ func (spec *MapSpec) fixupMagicFields() (*MapSpec, error) { // behaviour in the past. spec.MaxEntries = n } + + case spec.Type == CPUMap: + n, err := PossibleCPU() + if err != nil { + return nil, fmt.Errorf("fixup cpu map: %w", err) + } + + if n := uint32(n); spec.MaxEntries == 0 || spec.MaxEntries > n { + // Perform clamping similar to PerfEventArray. + spec.MaxEntries = n + } } return spec, nil @@ -190,6 +206,14 @@ func (ms *MapSpec) dataSection() ([]byte, *btf.Datasec, error) { return value, ds, nil } +func (ms *MapSpec) readOnly() bool { + return (ms.Flags & sys.BPF_F_RDONLY_PROG) > 0 +} + +func (ms *MapSpec) writeOnly() bool { + return (ms.Flags & sys.BPF_F_WRONLY_PROG) > 0 +} + // MapKV is used to initialize the contents of a Map. type MapKV struct { Key interface{} @@ -220,11 +244,16 @@ func (ms *MapSpec) Compatible(m *Map) error { diffs = append(diffs, fmt.Sprintf("MaxEntries: %d changed to %d", m.maxEntries, ms.MaxEntries)) } - // BPF_F_RDONLY_PROG is set unconditionally for devmaps. Explicitly allow this - // mismatch. - if !((ms.Type == DevMap || ms.Type == DevMapHash) && m.flags^ms.Flags == unix.BPF_F_RDONLY_PROG) && - m.flags != ms.Flags { - diffs = append(diffs, fmt.Sprintf("Flags: %d changed to %d", m.flags, ms.Flags)) + flags := ms.Flags + if ms.Type == DevMap || ms.Type == DevMapHash { + // As of 0cdbb4b09a06 ("devmap: Allow map lookups from eBPF") + // BPF_F_RDONLY_PROG is set unconditionally for devmaps. Explicitly + // allow this mismatch. + flags |= (m.flags & sys.BPF_F_RDONLY_PROG) + } + + if m.flags != flags { + diffs = append(diffs, fmt.Sprintf("Flags: %d changed to %d", m.flags, flags)) } if len(diffs) == 0 { @@ -254,11 +283,15 @@ type Map struct { pinnedPath string // Per CPU maps return values larger than the size in the spec fullValueSize int + + memory *Memory } -// NewMapFromFD creates a map from a raw fd. +// NewMapFromFD creates a [Map] around a raw fd. // // You should not use fd after calling this function. +// +// Requires at least Linux 4.13. func NewMapFromFD(fd int) (*Map, error) { f, err := sys.NewFD(fd) if err != nil { @@ -269,13 +302,13 @@ func NewMapFromFD(fd int) (*Map, error) { } func newMapFromFD(fd *sys.FD) (*Map, error) { - info, err := newMapInfoFromFd(fd) + info, err := minimalMapInfoFromFd(fd) if err != nil { fd.Close() return nil, fmt.Errorf("get map info: %w", err) } - return newMap(fd, info.Name, info.Type, info.KeySize, info.ValueSize, info.MaxEntries, info.Flags) + return newMapFromParts(fd, info.Name, info.Type, info.KeySize, info.ValueSize, info.MaxEntries, info.Flags) } // NewMap creates a new Map. @@ -350,7 +383,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions) (_ *Map, err error) { } var innerFd *sys.FD - if spec.Type == ArrayOfMaps || spec.Type == HashOfMaps { + if spec.Type.canStoreMap() { if spec.InnerMap == nil { return nil, fmt.Errorf("%s requires InnerMap", spec.Type) } @@ -359,7 +392,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions) (_ *Map, err error) { return nil, errors.New("inner maps cannot be pinned") } - template, err := spec.InnerMap.createMap(nil, opts) + template, err := spec.InnerMap.createMap(nil) if err != nil { return nil, fmt.Errorf("inner map: %w", err) } @@ -371,7 +404,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions) (_ *Map, err error) { innerFd = template.fd } - m, err := spec.createMap(innerFd, opts) + m, err := spec.createMap(innerFd) if err != nil { return nil, err } @@ -387,9 +420,86 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions) (_ *Map, err error) { return m, nil } +// Memory returns a memory-mapped region for the Map. The Map must have been +// created with the BPF_F_MMAPABLE flag. Repeated calls to Memory return the +// same mapping. Callers are responsible for coordinating access to Memory. +func (m *Map) Memory() (*Memory, error) { + if m.memory != nil { + return m.memory, nil + } + + if m.flags&sys.BPF_F_MMAPABLE == 0 { + return nil, fmt.Errorf("Map was not created with the BPF_F_MMAPABLE flag: %w", ErrNotSupported) + } + + size, err := m.memorySize() + if err != nil { + return nil, err + } + + mm, err := newMemory(m.FD(), size) + if err != nil { + return nil, fmt.Errorf("creating new Memory: %w", err) + } + + m.memory = mm + + return mm, nil +} + +// unsafeMemory returns a heap-mapped memory region for the Map. The Map must +// have been created with the BPF_F_MMAPABLE flag. Repeated calls to Memory +// return the same mapping. Callers are responsible for coordinating access to +// Memory. +func (m *Map) unsafeMemory() (*Memory, error) { + if m.memory != nil { + if !m.memory.heap { + return nil, errors.New("unsafeMemory would return existing non-heap memory") + } + + return m.memory, nil + } + + if m.flags&sys.BPF_F_MMAPABLE == 0 { + return nil, fmt.Errorf("Map was not created with the BPF_F_MMAPABLE flag: %w", ErrNotSupported) + } + + size, err := m.memorySize() + if err != nil { + return nil, err + } + + mm, err := newUnsafeMemory(m.FD(), size) + if err != nil { + return nil, fmt.Errorf("creating new Memory: %w", err) + } + + m.memory = mm + + return mm, nil +} + +func (m *Map) memorySize() (int, error) { + switch m.Type() { + case Array: + // In Arrays, values are always laid out on 8-byte boundaries regardless of + // architecture. Multiply by MaxEntries and align the result to the host's + // page size. + size := int(internal.Align(m.ValueSize(), 8) * m.MaxEntries()) + size = internal.Align(size, os.Getpagesize()) + return size, nil + case Arena: + // For Arenas, MaxEntries denotes the maximum number of pages available to + // the arena. + return int(m.MaxEntries()) * os.Getpagesize(), nil + } + + return 0, fmt.Errorf("determine memory size of map type %s: %w", m.Type(), ErrNotSupported) +} + // createMap validates the spec's properties and creates the map in the kernel // using the given opts. It does not populate or freeze the map. -func (spec *MapSpec) createMap(inner *sys.FD, opts MapOptions) (_ *Map, err error) { +func (spec *MapSpec) createMap(inner *sys.FD) (_ *Map, err error) { closeOnError := func(closer io.Closer) { if err != nil { closer.Close() @@ -411,12 +521,18 @@ func (spec *MapSpec) createMap(inner *sys.FD, opts MapOptions) (_ *Map, err erro return nil, err } + p, sysMapType := platform.DecodeConstant(spec.Type) + if p != platform.Native { + return nil, fmt.Errorf("map type %s (%s): %w", spec.Type, p, internal.ErrNotSupportedOnOS) + } + attr := sys.MapCreateAttr{ - MapType: sys.MapType(spec.Type), + MapName: maybeFillObjName(spec.Name), + MapType: sys.MapType(sysMapType), KeySize: spec.KeySize, ValueSize: spec.ValueSize, MaxEntries: spec.MaxEntries, - MapFlags: sys.MapFlags(spec.Flags), + MapFlags: spec.Flags, NumaNode: spec.NumaNode, } @@ -424,10 +540,6 @@ func (spec *MapSpec) createMap(inner *sys.FD, opts MapOptions) (_ *Map, err erro attr.InnerMapFd = inner.Uint() } - if haveObjName() == nil { - attr.MapName = sys.NewObjName(spec.Name) - } - if spec.Key != nil || spec.Value != nil { handle, keyTypeID, valueTypeID, err := btf.MarshalMapKV(spec.Key, spec.Value) if err != nil && !errors.Is(err, btf.ErrNotSupported) { @@ -457,7 +569,7 @@ func (spec *MapSpec) createMap(inner *sys.FD, opts MapOptions) (_ *Map, err erro } defer closeOnError(fd) - m, err := newMap(fd, spec.Name, spec.Type, spec.KeySize, spec.ValueSize, spec.MaxEntries, spec.Flags) + m, err := newMapFromParts(fd, spec.Name, spec.Type, spec.KeySize, spec.ValueSize, spec.MaxEntries, spec.Flags) if err != nil { return nil, fmt.Errorf("map create: %w", err) } @@ -465,41 +577,54 @@ func (spec *MapSpec) createMap(inner *sys.FD, opts MapOptions) (_ *Map, err erro } func handleMapCreateError(attr sys.MapCreateAttr, spec *MapSpec, err error) error { + if platform.IsWindows { + if errors.Is(err, unix.EINVAL) && attr.MapFlags != 0 { + return fmt.Errorf("map create: flags: %w", internal.ErrNotSupportedOnOS) + } + + return err + } + if errors.Is(err, unix.EPERM) { return fmt.Errorf("map create: %w (MEMLOCK may be too low, consider rlimit.RemoveMemlock)", err) } - if errors.Is(err, unix.EINVAL) && spec.MaxEntries == 0 { - return fmt.Errorf("map create: %w (MaxEntries may be incorrectly set to zero)", err) - } - if errors.Is(err, unix.EINVAL) && spec.Type == UnspecifiedMap { - return fmt.Errorf("map create: cannot use type %s", UnspecifiedMap) - } - if errors.Is(err, unix.EINVAL) && spec.Flags&unix.BPF_F_NO_PREALLOC > 0 { - return fmt.Errorf("map create: %w (noPrealloc flag may be incompatible with map type %s)", err, spec.Type) + if errors.Is(err, unix.EINVAL) { + if spec.MaxEntries == 0 { + return fmt.Errorf("map create: %w (MaxEntries may be incorrectly set to zero)", err) + } + if spec.Type == UnspecifiedMap { + return fmt.Errorf("map create: cannot use type %s", UnspecifiedMap) + } + if spec.Flags&sys.BPF_F_NO_PREALLOC != 0 && !spec.Type.mustHaveNoPrealloc() { + return fmt.Errorf("map create: %w (BPF_F_NO_PREALLOC flag may be incompatible with map type %s)", err, spec.Type) + } + if spec.Flags&sys.BPF_F_NO_PREALLOC == 0 && spec.Type.mustHaveNoPrealloc() { + return fmt.Errorf("map create: %w (BPF_F_NO_PREALLOC flag may need to be set for map type %s)", err, spec.Type) + } } - switch spec.Type { - case ArrayOfMaps, HashOfMaps: + if spec.Type.canStoreMap() { if haveFeatErr := haveNestedMaps(); haveFeatErr != nil { return fmt.Errorf("map create: %w", haveFeatErr) } } - if spec.Flags&(unix.BPF_F_RDONLY_PROG|unix.BPF_F_WRONLY_PROG) > 0 || spec.Freeze { + + if spec.readOnly() || spec.writeOnly() { if haveFeatErr := haveMapMutabilityModifiers(); haveFeatErr != nil { return fmt.Errorf("map create: %w", haveFeatErr) } } - if spec.Flags&unix.BPF_F_MMAPABLE > 0 { + if spec.Flags&sys.BPF_F_MMAPABLE > 0 { if haveFeatErr := haveMmapableMaps(); haveFeatErr != nil { return fmt.Errorf("map create: %w", haveFeatErr) } } - if spec.Flags&unix.BPF_F_INNER_MAP > 0 { + if spec.Flags&sys.BPF_F_INNER_MAP > 0 { if haveFeatErr := haveInnerMaps(); haveFeatErr != nil { return fmt.Errorf("map create: %w", haveFeatErr) } } - if spec.Flags&unix.BPF_F_NO_PREALLOC > 0 { + if spec.Flags&sys.BPF_F_NO_PREALLOC > 0 { if haveFeatErr := haveNoPreallocMaps(); haveFeatErr != nil { return fmt.Errorf("map create: %w", haveFeatErr) } @@ -517,9 +642,9 @@ func handleMapCreateError(attr sys.MapCreateAttr, spec *MapSpec, err error) erro return fmt.Errorf("map create: %w", err) } -// newMap allocates and returns a new Map structure. +// newMapFromParts allocates and returns a new Map structure. // Sets the fullValueSize on per-CPU maps. -func newMap(fd *sys.FD, name string, typ MapType, keySize, valueSize, maxEntries, flags uint32) (*Map, error) { +func newMapFromParts(fd *sys.FD, name string, typ MapType, keySize, valueSize, maxEntries, flags uint32) (*Map, error) { m := &Map{ name, fd, @@ -530,6 +655,7 @@ func newMap(fd *sys.FD, name string, typ MapType, keySize, valueSize, maxEntries flags, "", int(valueSize), + nil, } if !typ.hasPerCPUValue() { @@ -577,14 +703,19 @@ func (m *Map) Flags() uint32 { return m.flags } -// Info returns metadata about the map. +// Info returns metadata about the map. This was first introduced in Linux 4.5, +// but newer kernels support more MapInfo fields with the introduction of more +// features. See [MapInfo] and its methods for more details. +// +// Returns an error wrapping [ErrNotSupported] if the kernel supports neither +// BPF_OBJ_GET_INFO_BY_FD nor reading map information from /proc/self/fdinfo. func (m *Map) Info() (*MapInfo, error) { return newMapInfoFromFd(m.fd) } // Handle returns a reference to the Map's type information in the kernel. // -// Returns ErrNotSupported if the kernel has no BTF support, or if there is no +// Returns [ErrNotSupported] if the kernel has no BTF support, or if there is no // BTF associated with the Map. func (m *Map) Handle() (*btf.Handle, error) { info, err := m.Info() @@ -604,7 +735,7 @@ func (m *Map) Handle() (*btf.Handle, error) { type MapLookupFlags uint64 // LookupLock look up the value of a spin-locked map. -const LookupLock MapLookupFlags = unix.BPF_F_LOCK +const LookupLock MapLookupFlags = sys.BPF_F_LOCK // Lookup retrieves a value from a Map. // @@ -670,7 +801,7 @@ func (m *Map) LookupAndDeleteWithFlags(key, valueOut interface{}, flags MapLooku // Returns a nil value if a key doesn't exist. func (m *Map) LookupBytes(key interface{}) ([]byte, error) { valueBytes := make([]byte, m.fullValueSize) - valuePtr := sys.NewSlicePointer(valueBytes) + valuePtr := sys.UnsafeSlicePointer(valueBytes) err := m.lookup(key, valuePtr, 0) if errors.Is(err, ErrKeyNotExist) { @@ -686,7 +817,7 @@ func (m *Map) lookupPerCPU(key, valueOut any, flags MapLookupFlags) error { return err } valueBytes := make([]byte, m.fullValueSize) - if err := m.lookup(key, sys.NewSlicePointer(valueBytes), flags); err != nil { + if err := m.lookup(key, sys.UnsafeSlicePointer(valueBytes), flags); err != nil { return err } return unmarshalPerCPUValue(slice, int(m.valueSize), valueBytes) @@ -720,7 +851,7 @@ func (m *Map) lookupAndDeletePerCPU(key, valueOut any, flags MapLookupFlags) err return err } valueBytes := make([]byte, m.fullValueSize) - if err := m.lookupAndDelete(key, sys.NewSlicePointer(valueBytes), flags); err != nil { + if err := m.lookupAndDelete(key, sys.UnsafeSlicePointer(valueBytes), flags); err != nil { return err } return unmarshalPerCPUValue(slice, int(m.valueSize), valueBytes) @@ -897,7 +1028,7 @@ func (m *Map) NextKey(key, nextKeyOut interface{}) error { // Returns nil if there are no more keys. func (m *Map) NextKeyBytes(key interface{}) ([]byte, error) { nextKey := make([]byte, m.keySize) - nextKeyPtr := sys.NewSlicePointer(nextKey) + nextKeyPtr := sys.UnsafeSlicePointer(nextKey) err := m.nextKey(key, nextKeyPtr) if errors.Is(err, ErrKeyNotExist) { @@ -929,7 +1060,7 @@ func (m *Map) nextKey(key interface{}, nextKeyOut sys.Pointer) error { if err = sys.MapGetNextKey(&attr); err != nil { // Kernels 4.4.131 and earlier return EFAULT instead of a pointer to the // first map element when a nil key pointer is specified. - if key == nil && errors.Is(err, unix.EFAULT) { + if platform.IsLinux && key == nil && errors.Is(err, unix.EFAULT) { var guessKey []byte guessKey, err = m.guessNonExistentKey() if err != nil { @@ -937,7 +1068,7 @@ func (m *Map) nextKey(key interface{}, nextKeyOut sys.Pointer) error { } // Retry the syscall with a valid non-existing key. - attr.Key = sys.NewSlicePointer(guessKey) + attr.Key = sys.UnsafeSlicePointer(guessKey) if err = sys.MapGetNextKey(&attr); err == nil { return nil } @@ -963,7 +1094,7 @@ func (m *Map) guessNonExistentKey() ([]byte, error) { if err != nil { return nil, err } - valuePtr := sys.NewSlicePointer(page) + valuePtr := sys.UnsafeSlicePointer(page) randKey := make([]byte, int(m.keySize)) @@ -1066,13 +1197,13 @@ func (m *Map) batchLookup(cmd sys.Cmd, cursor *MapBatchCursor, keysOut, valuesOu valueBuf := sysenc.SyscallOutput(valuesOut, count*int(m.fullValueSize)) - n, err := m.batchLookupCmd(cmd, cursor, count, keysOut, valueBuf.Pointer(), opts) - if errors.Is(err, unix.ENOSPC) { + n, sysErr := m.batchLookupCmd(cmd, cursor, count, keysOut, valueBuf.Pointer(), opts) + if errors.Is(sysErr, unix.ENOSPC) { // Hash tables return ENOSPC when the size of the batch is smaller than // any bucket. - return n, fmt.Errorf("%w (batch size too small?)", err) - } else if err != nil { - return n, err + return n, fmt.Errorf("%w (batch size too small?)", sysErr) + } else if sysErr != nil && !errors.Is(sysErr, unix.ENOENT) { + return 0, sysErr } err = valueBuf.Unmarshal(valuesOut) @@ -1080,7 +1211,7 @@ func (m *Map) batchLookup(cmd sys.Cmd, cursor *MapBatchCursor, keysOut, valuesOu return 0, err } - return n, nil + return n, sysErr } func (m *Map) batchLookupPerCPU(cmd sys.Cmd, cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { @@ -1090,11 +1221,11 @@ func (m *Map) batchLookupPerCPU(cmd sys.Cmd, cursor *MapBatchCursor, keysOut, va } valueBuf := make([]byte, count*int(m.fullValueSize)) - valuePtr := sys.NewSlicePointer(valueBuf) + valuePtr := sys.UnsafeSlicePointer(valueBuf) n, sysErr := m.batchLookupCmd(cmd, cursor, count, keysOut, valuePtr, opts) if sysErr != nil && !errors.Is(sysErr, unix.ENOENT) { - return 0, err + return 0, sysErr } err = unmarshalBatchPerCPUValue(valuesOut, count, int(m.valueSize), valueBuf) @@ -1106,17 +1237,14 @@ func (m *Map) batchLookupPerCPU(cmd sys.Cmd, cursor *MapBatchCursor, keysOut, va } func (m *Map) batchLookupCmd(cmd sys.Cmd, cursor *MapBatchCursor, count int, keysOut any, valuePtr sys.Pointer, opts *BatchOptions) (int, error) { - cursorLen := int(m.keySize) - if cursorLen < 4 { - // * generic_map_lookup_batch requires that batch_out is key_size bytes. - // This is used by array and LPM maps. - // - // * __htab_map_lookup_and_delete_batch requires u32. This is used by the - // various hash maps. - // - // Use a minimum of 4 bytes to avoid having to distinguish between the two. - cursorLen = 4 - } + // * generic_map_lookup_batch requires that batch_out is key_size bytes. + // This is used by array and LPM maps. + // + // * __htab_map_lookup_and_delete_batch requires u32. This is used by the + // various hash maps. + // + // Use a minimum of 4 bytes to avoid having to distinguish between the two. + cursorLen := max(int(m.keySize), 4) inBatch := cursor.opaque if inBatch == nil { @@ -1142,8 +1270,8 @@ func (m *Map) batchLookupCmd(cmd sys.Cmd, cursor *MapBatchCursor, count int, key Keys: keyBuf.Pointer(), Values: valuePtr, Count: uint32(count), - InBatch: sys.NewSlicePointer(inBatch), - OutBatch: sys.NewSlicePointer(cursor.opaque), + InBatch: sys.UnsafeSlicePointer(inBatch), + OutBatch: sys.UnsafeSlicePointer(cursor.opaque), } if opts != nil { @@ -1225,7 +1353,7 @@ func (m *Map) batchUpdatePerCPU(keys, values any, opts *BatchOptions) (int, erro return 0, err } - return m.batchUpdate(count, keys, sys.NewSlicePointer(valueBuf), opts) + return m.batchUpdate(count, keys, sys.UnsafeSlicePointer(valueBuf), opts) } // BatchDelete batch deletes entries in the map by keys. @@ -1336,6 +1464,7 @@ func (m *Map) Clone() (*Map, error) { m.flags, "", m.fullValueSize, + nil, }, nil } @@ -1349,7 +1478,7 @@ func (m *Map) Clone() (*Map, error) { // This requires bpffs to be mounted above fileName. // See https://docs.cilium.io/en/stable/network/kubernetes/configuration/#mounting-bpffs-with-systemd func (m *Map) Pin(fileName string) error { - if err := internal.Pin(m.pinnedPath, fileName, m.fd); err != nil { + if err := sys.Pin(m.pinnedPath, fileName, m.fd); err != nil { return err } m.pinnedPath = fileName @@ -1362,7 +1491,7 @@ func (m *Map) Pin(fileName string) error { // // Unpinning an unpinned Map returns nil. func (m *Map) Unpin() error { - if err := internal.Unpin(m.pinnedPath); err != nil { + if err := sys.Unpin(m.pinnedPath); err != nil { return err } m.pinnedPath = "" @@ -1400,7 +1529,7 @@ func (m *Map) finalize(spec *MapSpec) error { } } - if spec.Freeze { + if isConstantDataSection(spec.Name) || isKconfigSection(spec.Name) { if err := m.Freeze(); err != nil { return fmt.Errorf("freezing map: %w", err) } @@ -1413,7 +1542,7 @@ func (m *Map) marshalKey(data interface{}) (sys.Pointer, error) { if data == nil { if m.keySize == 0 { // Queues have a key length of zero, so passing nil here is valid. - return sys.NewPointer(nil), nil + return sys.UnsafePointer(nil), nil } return sys.Pointer{}, errors.New("can't use nil as key of map") } @@ -1448,7 +1577,7 @@ func (m *Map) marshalValue(data interface{}) (sys.Pointer, error) { return sys.Pointer{}, err } - return sys.NewSlicePointer(buf), nil + return sys.UnsafeSlicePointer(buf), nil } func (m *Map) unmarshalValue(value any, buf sysenc.Buffer) error { @@ -1501,9 +1630,11 @@ func (m *Map) unmarshalValue(value any, buf sysenc.Buffer) error { return buf.Unmarshal(value) } -// LoadPinnedMap loads a Map from a BPF file. +// LoadPinnedMap opens a Map from a pin (file) on the BPF virtual filesystem. +// +// Requires at least Linux 4.5. func LoadPinnedMap(fileName string, opts *LoadPinOptions) (*Map, error) { - fd, err := sys.ObjGet(&sys.ObjGetAttr{ + fd, typ, err := sys.ObjGetTyped(&sys.ObjGetAttr{ Pathname: sys.NewStringPointer(fileName), FileFlags: opts.Marshal(), }) @@ -1511,6 +1642,11 @@ func LoadPinnedMap(fileName string, opts *LoadPinOptions) (*Map, error) { return nil, err } + if typ != sys.BPF_TYPE_MAP { + _ = fd.Close() + return nil, fmt.Errorf("%s is not a Map", fileName) + } + m, err := newMapFromFD(fd) if err == nil { m.pinnedPath = fileName @@ -1530,6 +1666,10 @@ func unmarshalMap(buf sysenc.Buffer) (*Map, error) { // marshalMap marshals the fd of a map into a buffer in host endianness. func marshalMap(m *Map, length int) ([]byte, error) { + if m == nil { + return nil, errors.New("can't marshal a nil Map") + } + if length != 4 { return nil, fmt.Errorf("can't marshal map to %d bytes", length) } @@ -1645,9 +1785,10 @@ func MapGetNextID(startID MapID) (MapID, error) { return MapID(attr.NextId), sys.MapGetNextId(attr) } -// NewMapFromID returns the map for a given id. +// NewMapFromID returns the [Map] for a given map id. Returns [ErrNotExist] if +// there is no eBPF map with the given id. // -// Returns ErrNotExist, if there is no eBPF map with the given id. +// Requires at least Linux 4.13. func NewMapFromID(id MapID) (*Map, error) { fd, err := sys.MapGetFdById(&sys.MapGetFdByIdAttr{ Id: uint32(id), diff --git a/src/runtime/vendor/github.com/cilium/ebpf/marshalers.go b/src/runtime/vendor/github.com/cilium/ebpf/marshalers.go index 57a0a8e88af6..d4e719c601e2 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/marshalers.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/marshalers.go @@ -20,7 +20,7 @@ import ( // unsafe.Pointer. func marshalMapSyscallInput(data any, length int) (sys.Pointer, error) { if ptr, ok := data.(unsafe.Pointer); ok { - return sys.NewPointer(ptr), nil + return sys.UnsafePointer(ptr), nil } buf, err := sysenc.Marshal(data, length) @@ -96,7 +96,7 @@ func marshalPerCPUValue(slice any, elemLength int) (sys.Pointer, error) { return sys.Pointer{}, err } - return sys.NewSlicePointer(buf), nil + return sys.UnsafeSlicePointer(buf), nil } // marshalBatchPerCPUValue encodes a batch-sized slice of slices containing diff --git a/src/runtime/vendor/github.com/cilium/ebpf/memory.go b/src/runtime/vendor/github.com/cilium/ebpf/memory.go new file mode 100644 index 000000000000..3475fb07b88e --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/memory.go @@ -0,0 +1,150 @@ +package ebpf + +import ( + "errors" + "fmt" + "io" + "runtime" + + "github.com/cilium/ebpf/internal/unix" +) + +// Memory is the building block for accessing the memory of specific bpf map +// types (Array and Arena at the time of writing) without going through the bpf +// syscall interface. +// +// Given the fd of a bpf map created with the BPF_F_MMAPABLE flag, a shared +// 'file'-based memory-mapped region can be allocated in the process' address +// space, exposing the bpf map's memory by simply accessing a memory location. + +var ErrReadOnly = errors.New("resource is read-only") + +// Memory implements accessing a Map's memory without making any syscalls. +// Pay attention to the difference between Go and C struct alignment rules. Use +// [structs.HostLayout] on supported Go versions to help with alignment. +// +// Note on memory coherence: avoid using packed structs in memory shared between +// user space and eBPF C programs. This drops a struct's memory alignment to 1, +// forcing the compiler to use single-byte loads and stores for field accesses. +// This may lead to partially-written data to be observed from user space. +// +// On most architectures, the memmove implementation used by Go's copy() will +// access data in word-sized chunks. If paired with a matching access pattern on +// the eBPF C side (and if using default memory alignment), accessing shared +// memory without atomics or other synchronization primitives should be sound +// for individual values. For accesses beyond a single value, the usual +// concurrent programming rules apply. +type Memory struct { + b []byte + ro bool + heap bool +} + +func newMemory(fd, size int) (*Memory, error) { + // Typically, maps created with BPF_F_RDONLY_PROG remain writable from user + // space until frozen. As a security precaution, the kernel doesn't allow + // mapping bpf map memory as read-write into user space if the bpf map was + // frozen, or if it was created using the RDONLY_PROG flag. + // + // The user would be able to write to the map after freezing (since the kernel + // can't change the protection mode of an already-mapped page), while the + // verifier assumes the contents to be immutable. + b, err := unix.Mmap(fd, 0, size, unix.PROT_READ|unix.PROT_WRITE, unix.MAP_SHARED) + + // If the map is frozen when an rw mapping is requested, expect EPERM. If the + // map was created with BPF_F_RDONLY_PROG, expect EACCES. + var ro bool + if errors.Is(err, unix.EPERM) || errors.Is(err, unix.EACCES) { + ro = true + b, err = unix.Mmap(fd, 0, size, unix.PROT_READ, unix.MAP_SHARED) + } + if err != nil { + return nil, fmt.Errorf("setting up memory-mapped region: %w", err) + } + + mm := &Memory{ + b, + ro, + false, + } + runtime.SetFinalizer(mm, (*Memory).close) + + return mm, nil +} + +func (mm *Memory) close() { + if err := unix.Munmap(mm.b); err != nil { + panic(fmt.Errorf("unmapping memory: %w", err)) + } + mm.b = nil +} + +// Size returns the size of the memory-mapped region in bytes. +func (mm *Memory) Size() int { + return len(mm.b) +} + +// ReadOnly returns true if the memory-mapped region is read-only. +func (mm *Memory) ReadOnly() bool { + return mm.ro +} + +// bounds returns true if an access at off of the given size is within bounds. +func (mm *Memory) bounds(off uint64, size uint64) bool { + if off+size < off { + return false + } + return off+size <= uint64(len(mm.b)) +} + +// ReadAt implements [io.ReaderAt]. Useful for creating a new [io.OffsetWriter]. +// +// See [Memory] for details around memory coherence. +func (mm *Memory) ReadAt(p []byte, off int64) (int, error) { + if mm.b == nil { + return 0, fmt.Errorf("memory-mapped region closed") + } + + if p == nil { + return 0, fmt.Errorf("input buffer p is nil") + } + + if off < 0 || off >= int64(len(mm.b)) { + return 0, fmt.Errorf("read offset out of range") + } + + n := copy(p, mm.b[off:]) + if n < len(p) { + return n, io.EOF + } + + return n, nil +} + +// WriteAt implements [io.WriterAt]. Useful for creating a new +// [io.SectionReader]. +// +// See [Memory] for details around memory coherence. +func (mm *Memory) WriteAt(p []byte, off int64) (int, error) { + if mm.b == nil { + return 0, fmt.Errorf("memory-mapped region closed") + } + if mm.ro { + return 0, fmt.Errorf("memory-mapped region not writable: %w", ErrReadOnly) + } + + if p == nil { + return 0, fmt.Errorf("output buffer p is nil") + } + + if off < 0 || off >= int64(len(mm.b)) { + return 0, fmt.Errorf("write offset out of range") + } + + n := copy(mm.b[off:], p) + if n < len(p) { + return n, io.EOF + } + + return n, nil +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/memory_unsafe.go b/src/runtime/vendor/github.com/cilium/ebpf/memory_unsafe.go new file mode 100644 index 000000000000..cc254397f317 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/memory_unsafe.go @@ -0,0 +1,342 @@ +package ebpf + +import ( + "errors" + "fmt" + "os" + "reflect" + "runtime" + "unsafe" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/unix" +) + +// This file contains an experimental, unsafe implementation of Memory that +// allows taking a Go pointer to a memory-mapped region. This currently does not +// have first-class support from the Go runtime, so it may break in future Go +// versions. The Go proposal for the runtime to track off-heap pointers is here: +// https://github.com/golang/go/issues/70224. +// +// In Go, the programmer should not have to worry about freeing memory. Since +// this API synthesizes Go variables around global variables declared in a BPF +// C program, we want to lean on the runtime for making sure accessing them is +// safe at all times. Unfortunately, Go (as of 1.24) does not have the ability +// of automatically managing memory that was not allocated by the runtime. +// +// This led to a solution that requests regular Go heap memory by allocating a +// slice (making the runtime track pointers into the slice's backing array) and +// memory-mapping the bpf map's memory over it. Then, before returning the +// Memory to the caller, a finalizer is set on the backing array, making sure +// the bpf map's memory is unmapped from the heap before releasing the backing +// array to the runtime for reallocation. +// +// This obviates the need to maintain a reference to the *Memory at all times, +// which is difficult for the caller to achieve if the variable access is done +// through another object (like a sync.Atomic) that can potentially be passed +// around the Go application. Accidentally losing the reference to the *Memory +// would result in hard-to-debug segfaults, which are always unexpected in Go. + +//go:linkname heapObjectsCanMove runtime.heapObjectsCanMove +func heapObjectsCanMove() bool + +// Set from a file behind the ebpf_unsafe_memory_experiment build tag to enable +// features that require mapping bpf map memory over the Go heap. +var unsafeMemory = false + +// ErrInvalidType is returned when the given type cannot be used as a Memory or +// Variable pointer. +var ErrInvalidType = errors.New("invalid type") + +func newUnsafeMemory(fd, size int) (*Memory, error) { + // Some architectures need the size to be page-aligned to work with MAP_FIXED. + if size%os.Getpagesize() != 0 { + return nil, fmt.Errorf("memory: must be a multiple of page size (requested %d bytes)", size) + } + + // Allocate a page-aligned span of memory on the Go heap. + alloc, err := allocate(size) + if err != nil { + return nil, fmt.Errorf("allocating memory: %w", err) + } + + // Typically, maps created with BPF_F_RDONLY_PROG remain writable from user + // space until frozen. As a security precaution, the kernel doesn't allow + // mapping bpf map memory as read-write into user space if the bpf map was + // frozen, or if it was created using the RDONLY_PROG flag. + // + // The user would be able to write to the map after freezing (since the kernel + // can't change the protection mode of an already-mapped page), while the + // verifier assumes the contents to be immutable. + // + // Map the bpf map memory over a page-aligned allocation on the Go heap. + err = mapmap(fd, alloc, size, unix.PROT_READ|unix.PROT_WRITE) + + // If the map is frozen when an rw mapping is requested, expect EPERM. If the + // map was created with BPF_F_RDONLY_PROG, expect EACCES. + var ro bool + if errors.Is(err, unix.EPERM) || errors.Is(err, unix.EACCES) { + ro = true + err = mapmap(fd, alloc, size, unix.PROT_READ) + } + if err != nil { + return nil, fmt.Errorf("setting up memory-mapped region: %w", err) + } + + mm := &Memory{ + unsafe.Slice((*byte)(alloc), size), + ro, + true, + } + + return mm, nil +} + +// allocate returns a pointer to a page-aligned section of memory on the Go +// heap, managed by the runtime. +// +//go:nocheckptr +func allocate(size int) (unsafe.Pointer, error) { + // Memory-mapping over a piece of the Go heap is unsafe when the GC can + // randomly decide to move objects around, in which case the mapped region + // will not move along with it. + if heapObjectsCanMove() { + return nil, errors.New("this Go runtime has a moving garbage collector") + } + + if size == 0 { + return nil, errors.New("size must be greater than 0") + } + + // Request at least two pages of memory from the runtime to ensure we can + // align the requested allocation to a page boundary. This is needed for + // MAP_FIXED and makes sure we don't mmap over some other allocation on the Go + // heap. + size = internal.Align(size+os.Getpagesize(), os.Getpagesize()) + + // Allocate a new slice and store a pointer to its backing array. + alloc := unsafe.Pointer(unsafe.SliceData(make([]byte, size))) + + // nolint:govet + // + // Align the pointer to a page boundary within the allocation. This may alias + // the initial pointer if it was already page-aligned. Ignore govet warnings + // since we're calling [runtime.KeepAlive] on the original Go memory. + aligned := unsafe.Pointer(internal.Align(uintptr(alloc), uintptr(os.Getpagesize()))) + runtime.KeepAlive(alloc) + + // Return an aligned pointer into the backing array, losing the original + // reference. The runtime.SetFinalizer docs specify that its argument 'must be + // a pointer to an object, complit or local var', but this is still somewhat + // vague and not enforced by the current implementation. + // + // Currently, finalizers can be set and triggered from any address within a + // heap allocation, even individual struct fields or arbitrary offsets within + // a slice. In this case, finalizers set on struct fields or slice offsets + // will only run when the whole struct or backing array are collected. The + // accepted runtime.AddCleanup proposal makes this behaviour more explicit and + // is set to deprecate runtime.SetFinalizer. + // + // Alternatively, we'd have to track the original allocation and the aligned + // pointer separately, which severely complicates finalizer setup and makes it + // prone to human error. For now, just bump the pointer and treat it as the + // new and only reference to the backing array. + return aligned, nil +} + +// mapmap memory-maps the given file descriptor at the given address and sets a +// finalizer on addr to unmap it when it's no longer reachable. +func mapmap(fd int, addr unsafe.Pointer, size, flags int) error { + // Map the bpf map memory over the Go heap. This will result in the following + // mmap layout in the process' address space (0xc000000000 is a span of Go + // heap), visualized using pmap: + // + // Address Kbytes RSS Dirty Mode Mapping + // 000000c000000000 1824 864 864 rw--- [ anon ] + // 000000c0001c8000 4 4 4 rw-s- [ anon ] + // 000000c0001c9000 2268 16 16 rw--- [ anon ] + // + // This will break up the Go heap, but as long as the runtime doesn't try to + // move our allocation around, this is safe for as long as we hold a reference + // to our allocated object. + // + // Use MAP_SHARED to make sure the kernel sees any writes we do, and MAP_FIXED + // to ensure the mapping starts exactly at the address we requested. If alloc + // isn't page-aligned, the mapping operation will fail. + if _, err := unix.MmapPtr(fd, 0, addr, uintptr(size), + flags, unix.MAP_SHARED|unix.MAP_FIXED); err != nil { + return fmt.Errorf("setting up memory-mapped region: %w", err) + } + + // Set a finalizer on the heap allocation to undo the mapping before the span + // is collected and reused by the runtime. This has a few reasons: + // + // - Avoid leaking memory/mappings. + // - Future writes to this memory should never clobber a bpf map's contents. + // - Some bpf maps are mapped read-only, causing a segfault if the runtime + // reallocates and zeroes the span later. + runtime.SetFinalizer((*byte)(addr), unmap(size)) + + return nil +} + +// unmap returns a function that takes a pointer to a memory-mapped region on +// the Go heap. The function undoes any mappings and discards the span's +// contents. +// +// Used as a finalizer in [newMemory], split off into a separate function for +// testing and to avoid accidentally closing over the unsafe.Pointer to the +// memory region, which would cause a cyclical reference. +// +// The resulting function panics if the mmap operation returns an error, since +// it would mean the integrity of the Go heap is compromised. +func unmap(size int) func(*byte) { + return func(a *byte) { + // Create another mapping at the same address to undo the original mapping. + // This will cause the kernel to repair the slab since we're using the same + // protection mode and flags as the original mapping for the Go heap. + // + // Address Kbytes RSS Dirty Mode Mapping + // 000000c000000000 4096 884 884 rw--- [ anon ] + // + // Using munmap here would leave an unmapped hole in the heap, compromising + // its integrity. + // + // MmapPtr allocates another unsafe.Pointer at the same address. Even though + // we discard it here, it may temporarily resurrect the backing array and + // delay its collection to the next GC cycle. + _, err := unix.MmapPtr(-1, 0, unsafe.Pointer(a), uintptr(size), + unix.PROT_READ|unix.PROT_WRITE, + unix.MAP_PRIVATE|unix.MAP_FIXED|unix.MAP_ANON) + if err != nil { + panic(fmt.Errorf("undoing bpf map memory mapping: %w", err)) + } + } +} + +// checkUnsafeMemory ensures value T can be accessed in mm at offset off. +// +// The comparable constraint narrows down the set of eligible types to exclude +// slices, maps and functions. These complex types cannot be mapped to memory +// directly. +func checkUnsafeMemory[T comparable](mm *Memory, off uint64) error { + if mm.b == nil { + return fmt.Errorf("memory-mapped region is nil") + } + if mm.ro { + return ErrReadOnly + } + if !mm.heap { + return fmt.Errorf("memory region is not heap-mapped, build with '-tags ebpf_unsafe_memory_experiment' to enable: %w", ErrNotSupported) + } + + t := reflect.TypeFor[T]() + if err := checkType(t.String(), t); err != nil { + return err + } + + size := t.Size() + if size == 0 { + return fmt.Errorf("zero-sized type %s: %w", t, ErrInvalidType) + } + + if off%uint64(t.Align()) != 0 { + return fmt.Errorf("unaligned access of memory-mapped region: %d-byte aligned read at offset %d", t.Align(), off) + } + + vs, bs := uint64(size), uint64(len(mm.b)) + if off+vs > bs { + return fmt.Errorf("%d-byte value at offset %d exceeds mmap size of %d bytes", vs, off, bs) + } + + return nil +} + +// checkType recursively checks if the given type is supported for memory +// mapping. Only fixed-size, non-Go-pointer types are supported: bools, floats, +// (u)int[8-64], arrays, and structs containing them. As an exception, uintptr +// is allowed since the backing memory is expected to contain 32-bit pointers on +// 32-bit systems despite BPF always allocating 64 bits for pointers in a data +// section. +// +// Doesn't check for loops since it rejects pointers. Should that ever change, a +// visited set would be needed. +func checkType(name string, t reflect.Type) error { + // Special-case atomic types to allow them to be used as root types as well as + // struct fields. Notably, omit atomic.Value and atomic.Pointer since those + // are pointer types. Also, atomic.Value embeds an interface value, which + // doesn't make sense to share with C land. + if t.PkgPath() == "sync/atomic" { + switch t.Name() { + case "Bool", "Int32", "Int64", "Uint32", "Uint64", "Uintptr": + return nil + } + } + + switch t.Kind() { + case reflect.Uintptr, reflect.Bool, reflect.Float32, reflect.Float64, + reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return nil + + case reflect.Array: + at := t.Elem() + if err := checkType(fmt.Sprintf("%s.%s", name, at.String()), at); err != nil { + return err + } + + case reflect.Struct: + var hasHostLayout bool + for i := range t.NumField() { + at := t.Field(i).Type + + // Require [structs.HostLayout] to be embedded in all structs. Check the + // full package path to reject a user-defined HostLayout type. + if at.PkgPath() == "structs" && at.Name() == "HostLayout" { + hasHostLayout = true + continue + } + + if err := checkType(fmt.Sprintf("%s.%s", name, at.String()), at); err != nil { + return err + } + } + + if !hasHostLayout { + return fmt.Errorf("struct %s must embed structs.HostLayout: %w", name, ErrInvalidType) + } + + default: + // For basic types like int and bool, the kind name is the same as the type + // name, so the fallthrough case would print 'int type int not supported'. + // Omit the kind name if it matches the type name. + if t.String() == t.Kind().String() { + // Output: type int not supported + return fmt.Errorf("type %s not supported: %w", name, ErrInvalidType) + } + + // Output: interface value io.Reader not supported + return fmt.Errorf("%s type %s not supported: %w", t.Kind(), name, ErrInvalidType) + } + + return nil +} + +// memoryPointer returns a pointer to a value of type T at offset off in mm. +// Taking a pointer to a read-only Memory or to a Memory that is not heap-mapped +// is not supported. +// +// T must contain only fixed-size, non-Go-pointer types: bools, floats, +// (u)int[8-64], arrays, and structs containing them. Structs must embed +// [structs.HostLayout]. [ErrInvalidType] is returned if T is not a valid type. +// +// Memory must be writable, off must be aligned to the size of T, and the value +// must be within bounds of the Memory. +// +// To access read-only memory, use [Memory.ReadAt]. +func memoryPointer[T comparable](mm *Memory, off uint64) (*T, error) { + if err := checkUnsafeMemory[T](mm, off); err != nil { + return nil, fmt.Errorf("memory pointer: %w", err) + } + return (*T)(unsafe.Pointer(&mm.b[off])), nil +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/memory_unsafe_tag.go b/src/runtime/vendor/github.com/cilium/ebpf/memory_unsafe_tag.go new file mode 100644 index 000000000000..e662065edfda --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/memory_unsafe_tag.go @@ -0,0 +1,7 @@ +//go:build ebpf_unsafe_memory_experiment + +package ebpf + +func init() { + unsafeMemory = true +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/netlify.toml b/src/runtime/vendor/github.com/cilium/ebpf/netlify.toml index 67c83f3b307f..764c3b447b5f 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/netlify.toml +++ b/src/runtime/vendor/github.com/cilium/ebpf/netlify.toml @@ -2,3 +2,4 @@ base = "docs/" publish = "site/" command = "mkdocs build" + environment = { PYTHON_VERSION = "3.13" } diff --git a/src/runtime/vendor/github.com/cilium/ebpf/prog.go b/src/runtime/vendor/github.com/cilium/ebpf/prog.go index 9bc6325f8879..560c045adeea 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/prog.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/prog.go @@ -8,14 +8,14 @@ import ( "math" "path/filepath" "runtime" - "strings" + "sort" "time" - "unsafe" "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/btf" "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/kallsyms" + "github.com/cilium/ebpf/internal/linux" + "github.com/cilium/ebpf/internal/platform" "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/sysenc" "github.com/cilium/ebpf/internal/unix" @@ -37,7 +37,7 @@ var errBadRelocation = errors.New("bad CO-RE relocation") var errUnknownKfunc = errors.New("unknown kfunc") // ProgramID represents the unique ID of an eBPF program. -type ProgramID uint32 +type ProgramID = sys.ProgramID const ( // Number of bytes to pad the output buffer for BPF_PROG_TEST_RUN. @@ -46,14 +46,21 @@ const ( outputPad = 256 + 2 ) -// Deprecated: the correct log size is now detected automatically and this -// constant is unused. -const DefaultVerifierLogSize = 64 * 1024 - // minVerifierLogSize is the default number of bytes allocated for the // verifier log. const minVerifierLogSize = 64 * 1024 +// maxVerifierLogSize is the maximum size of verifier log buffer the kernel +// will accept before returning EINVAL. May be increased to MaxUint32 in the +// future, but avoid the unnecessary EINVAL for now. +const maxVerifierLogSize = math.MaxUint32 >> 2 + +// maxVerifierAttempts is the maximum number of times the verifier will retry +// loading a program with a growing log buffer before giving up. Since we double +// the log size on every attempt, this is the absolute maximum number of +// attempts before the buffer reaches [maxVerifierLogSize]. +const maxVerifierAttempts = 30 + // ProgramOptions control loading a program into the kernel. type ProgramOptions struct { // Bitmap controlling the detail emitted by the kernel's eBPF verifier log. @@ -73,9 +80,10 @@ type ProgramOptions struct { // attempt at loading the program. LogLevel LogLevel - // Deprecated: the correct log buffer size is determined automatically - // and this field is ignored. - LogSize int + // Starting size of the verifier log buffer. If the verifier log is larger + // than this size, the buffer will be grown to fit the entire log. Leave at + // its default value unless troubleshooting. + LogSizeStart uint32 // Disables the verifier log completely, regardless of other options. LogDisabled bool @@ -98,8 +106,9 @@ type ProgramOptions struct { // ProgramSpec defines a Program. type ProgramSpec struct { - // Name is passed to the kernel as a debug aid. Must only contain - // alpha numeric and '_' characters. + // Name is passed to the kernel as a debug aid. + // + // Unsupported characters will be stripped. Name string // Type determines at which hook in the kernel a program will run. @@ -162,26 +171,24 @@ func (ps *ProgramSpec) Tag() (string, error) { return ps.Instructions.Tag(internal.NativeEndian) } -// KernelModule returns the kernel module, if any, the AttachTo function is contained in. -func (ps *ProgramSpec) KernelModule() (string, error) { +// targetsKernelModule returns true if the program supports being attached to a +// symbol provided by a kernel module. +func (ps *ProgramSpec) targetsKernelModule() bool { if ps.AttachTo == "" { - return "", nil + return false } switch ps.Type { - default: - return "", nil case Tracing: switch ps.AttachType { - default: - return "", nil - case AttachTraceFEntry: - case AttachTraceFExit: + case AttachTraceFEntry, AttachTraceFExit: + return true } - fallthrough case Kprobe: - return kallsyms.KernelModule(ps.AttachTo) + return true } + + return false } // VerifierError is returned by [NewProgram] and [NewProgramWithOptions] if a @@ -226,7 +233,7 @@ func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, er return nil, errors.New("can't load a program from a nil spec") } - prog, err := newProgramWithOptions(spec, opts) + prog, err := newProgramWithOptions(spec, opts, newBTFCache(&opts)) if errors.Is(err, asm.ErrUnsatisfiedMapReference) { return nil, fmt.Errorf("cannot load program without loading its whole collection: %w", err) } @@ -242,7 +249,7 @@ var ( kfuncBadCall = []byte(fmt.Sprintf("invalid func unknown#%d\n", kfuncCallPoisonBase)) ) -func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) { +func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, c *btf.Cache) (*Program, error) { if len(spec.Instructions) == 0 { return nil, errors.New("instructions cannot be empty") } @@ -261,45 +268,32 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, er // Overwrite Kprobe program version if set to zero or the magic version constant. kv := spec.KernelVersion if spec.Type == Kprobe && (kv == 0 || kv == internal.MagicKernelVersion) { - v, err := internal.KernelVersion() + v, err := linux.KernelVersion() if err != nil { return nil, fmt.Errorf("detecting kernel version: %w", err) } kv = v.Kernel() } + p, progType := platform.DecodeConstant(spec.Type) + if p != platform.Native { + return nil, fmt.Errorf("program type %s (%s): %w", spec.Type, p, internal.ErrNotSupportedOnOS) + } + attr := &sys.ProgLoadAttr{ - ProgType: sys.ProgType(spec.Type), + ProgName: maybeFillObjName(spec.Name), + ProgType: sys.ProgType(progType), ProgFlags: spec.Flags, ExpectedAttachType: sys.AttachType(spec.AttachType), License: sys.NewStringPointer(spec.License), KernVersion: kv, } - if haveObjName() == nil { - attr.ProgName = sys.NewObjName(spec.Name) - } - insns := make(asm.Instructions, len(spec.Instructions)) copy(insns, spec.Instructions) - kmodName, err := spec.KernelModule() - if err != nil { - return nil, fmt.Errorf("kernel module search: %w", err) - } - - var targets []*btf.Spec - if opts.KernelTypes != nil { - targets = append(targets, opts.KernelTypes) - } - if kmodName != "" && opts.KernelModuleTypes != nil { - if modBTF, ok := opts.KernelModuleTypes[kmodName]; ok { - targets = append(targets, modBTF) - } - } - var b btf.Builder - if err := applyRelocations(insns, targets, kmodName, spec.ByteOrder, &b); err != nil { + if err := applyRelocations(insns, spec.ByteOrder, &b, c); err != nil { return nil, fmt.Errorf("apply CO-RE relocations: %w", err) } @@ -321,11 +315,11 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, er attr.FuncInfoRecSize = btf.FuncInfoSize attr.FuncInfoCnt = uint32(len(fib)) / btf.FuncInfoSize - attr.FuncInfo = sys.NewSlicePointer(fib) + attr.FuncInfo = sys.SlicePointer(fib) attr.LineInfoRecSize = btf.LineInfoSize attr.LineInfoCnt = uint32(len(lib)) / btf.LineInfoSize - attr.LineInfo = sys.NewSlicePointer(lib) + attr.LineInfo = sys.SlicePointer(lib) } if !b.Empty() { @@ -344,6 +338,10 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, er } defer kconfig.Close() + if err := resolveKsymReferences(insns); err != nil { + return nil, fmt.Errorf("resolve .ksyms: %w", err) + } + if err := fixupAndValidate(insns); err != nil { return nil, err } @@ -356,7 +354,7 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, er if len(handles) > 0 { fdArray := handles.fdArray() - attr.FdArray = sys.NewPointer(unsafe.Pointer(&fdArray[0])) + attr.FdArray = sys.SlicePointer(fdArray) } buf := bytes.NewBuffer(make([]byte, 0, insns.Size())) @@ -366,7 +364,7 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, er } bytecode := buf.Bytes() - attr.Insns = sys.NewSlicePointer(bytecode) + attr.Insns = sys.SlicePointer(bytecode) attr.InsnCnt = uint32(len(bytecode) / asm.InstructionSize) if spec.AttachTarget != nil { @@ -393,59 +391,48 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, er } } - // The caller requested a specific verifier log level. Set up the log buffer - // so that there is a chance of loading the program in a single shot. - var logBuf []byte - if !opts.LogDisabled && opts.LogLevel != 0 { - logBuf = make([]byte, minVerifierLogSize) - attr.LogLevel = opts.LogLevel - attr.LogSize = uint32(len(logBuf)) - attr.LogBuf = sys.NewSlicePointer(logBuf) + if platform.IsWindows && opts.LogLevel != 0 { + return nil, fmt.Errorf("log level: %w", internal.ErrNotSupportedOnOS) } - for { - var fd *sys.FD + var logBuf []byte + var fd *sys.FD + if opts.LogDisabled { + // Loading with logging disabled should never retry. fd, err = sys.ProgLoad(attr) if err == nil { - return &Program{unix.ByteSliceToString(logBuf), fd, spec.Name, "", spec.Type}, nil - } - - if opts.LogDisabled { - break + return &Program{"", fd, spec.Name, "", spec.Type}, nil } - - if attr.LogTrueSize != 0 && attr.LogSize >= attr.LogTrueSize { - // The log buffer already has the correct size. - break + } else { + // Only specify log size if log level is also specified. Setting size + // without level results in EINVAL. Level will be bumped to LogLevelBranch + // if the first load fails. + if opts.LogLevel != 0 { + attr.LogLevel = opts.LogLevel + attr.LogSize = internal.Between(opts.LogSizeStart, minVerifierLogSize, maxVerifierLogSize) } - if attr.LogSize != 0 && !errors.Is(err, unix.ENOSPC) { - // Logging is enabled and the error is not ENOSPC, so we can infer - // that the log buffer is large enough. - break - } + attempts := 1 + for { + if attr.LogLevel != 0 { + logBuf = make([]byte, attr.LogSize) + attr.LogBuf = sys.SlicePointer(logBuf) + } - if attr.LogLevel == 0 { - // Logging is not enabled but loading the program failed. Enable - // basic logging. - attr.LogLevel = LogLevelBranch - } + fd, err = sys.ProgLoad(attr) + if err == nil { + return &Program{unix.ByteSliceToString(logBuf), fd, spec.Name, "", spec.Type}, nil + } - // Make an educated guess how large the buffer should be. Start - // at minVerifierLogSize and then double the size. - logSize := uint32(max(len(logBuf)*2, minVerifierLogSize)) - if int(logSize) < len(logBuf) { - return nil, errors.New("overflow while probing log buffer size") - } + if !retryLogAttrs(attr, opts.LogSizeStart, err) { + break + } - if attr.LogTrueSize != 0 { - // The kernel has given us a hint how large the log buffer has to be. - logSize = attr.LogTrueSize + if attempts >= maxVerifierAttempts { + return nil, fmt.Errorf("load program: %w (bug: hit %d verifier attempts)", err, maxVerifierAttempts) + } + attempts++ } - - logBuf = make([]byte, logSize) - attr.LogSize = logSize - attr.LogBuf = sys.NewSlicePointer(logBuf) } end := bytes.IndexByte(logBuf, 0) @@ -462,6 +449,12 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, er return nil, fmt.Errorf("load program: %w (MEMLOCK may be too low, consider rlimit.RemoveMemlock)", err) } + case errors.Is(err, unix.EFAULT): + // EFAULT is returned when the kernel hits a verifier bug, and always + // overrides ENOSPC, defeating the buffer growth strategy. Warn the user + // that they may need to increase the buffer size manually. + return nil, fmt.Errorf("load program: %w (hit verifier bug, increase LogSizeStart to fit the log and check dmesg)", err) + case errors.Is(err, unix.EINVAL): if bytes.Contains(tail, coreBadCall) { err = errBadRelocation @@ -489,11 +482,55 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, er return nil, internal.ErrorWithLog("load program", err, logBuf) } -// NewProgramFromFD creates a program from a raw fd. +func retryLogAttrs(attr *sys.ProgLoadAttr, startSize uint32, err error) bool { + if attr.LogSize == maxVerifierLogSize { + // Maximum buffer size reached, don't grow or retry. + return false + } + + // ENOSPC means the log was enabled on the previous iteration, so we only + // need to grow the buffer. + if errors.Is(err, unix.ENOSPC) { + if attr.LogTrueSize != 0 { + // Kernel supports LogTrueSize and previous iteration undershot the buffer + // size. Try again with the given true size. + attr.LogSize = attr.LogTrueSize + return true + } + + // Ensure the size doesn't overflow. + const factor = 2 + if attr.LogSize >= maxVerifierLogSize/factor { + attr.LogSize = maxVerifierLogSize + return true + } + + // Make an educated guess how large the buffer should be by multiplying. Due + // to int division, this rounds down odd sizes. + attr.LogSize = internal.Between(attr.LogSize, minVerifierLogSize, maxVerifierLogSize/factor) + attr.LogSize *= factor + + return true + } + + if attr.LogLevel == 0 { + // Loading the program failed, it wasn't a buffer-related error, and the log + // was disabled the previous iteration. Enable basic logging and retry. + attr.LogLevel = LogLevelBranch + attr.LogSize = internal.Between(startSize, minVerifierLogSize, maxVerifierLogSize) + return true + } + + // Loading the program failed for a reason other than buffer size and the log + // was already enabled the previous iteration. Don't retry. + return false +} + +// NewProgramFromFD creates a [Program] around a raw fd. // // You should not use fd after calling this function. // -// Requires at least Linux 4.10. +// Requires at least Linux 4.13. Returns an error on Windows. func NewProgramFromFD(fd int) (*Program, error) { f, err := sys.NewFD(fd) if err != nil { @@ -503,9 +540,10 @@ func NewProgramFromFD(fd int) (*Program, error) { return newProgramFromFD(f) } -// NewProgramFromID returns the program for a given id. +// NewProgramFromID returns the [Program] for a given program id. Returns +// [ErrNotExist] if there is no eBPF program with the given id. // -// Returns ErrNotExist, if there is no eBPF program with the given id. +// Requires at least Linux 4.13. func NewProgramFromID(id ProgramID) (*Program, error) { fd, err := sys.ProgGetFdById(&sys.ProgGetFdByIdAttr{ Id: uint32(id), @@ -518,7 +556,7 @@ func NewProgramFromID(id ProgramID) (*Program, error) { } func newProgramFromFD(fd *sys.FD) (*Program, error) { - info, err := newProgramInfoFromFd(fd) + info, err := minimalProgramInfoFromFd(fd) if err != nil { fd.Close() return nil, fmt.Errorf("discover program type: %w", err) @@ -546,6 +584,14 @@ func (p *Program) Info() (*ProgramInfo, error) { return newProgramInfoFromFd(p.fd) } +// Stats returns runtime statistics about the Program. Requires BPF statistics +// collection to be enabled, see [EnableStats]. +// +// Requires at least Linux 5.8. +func (p *Program) Stats() (*ProgramStats, error) { + return newProgramStatsFromFd(p.fd) +} + // Handle returns a reference to the program's type information in the kernel. // // Returns ErrNotSupported if the kernel has no BTF support, or if there is no @@ -598,7 +644,7 @@ func (p *Program) Clone() (*Program, error) { // This requires bpffs to be mounted above fileName. // See https://docs.cilium.io/en/stable/network/kubernetes/configuration/#mounting-bpffs-with-systemd func (p *Program) Pin(fileName string) error { - if err := internal.Pin(p.pinnedPath, fileName, p.fd); err != nil { + if err := sys.Pin(p.pinnedPath, fileName, p.fd); err != nil { return err } p.pinnedPath = fileName @@ -611,7 +657,7 @@ func (p *Program) Pin(fileName string) error { // // Unpinning an unpinned Program returns nil. func (p *Program) Unpin() error { - if err := internal.Unpin(p.pinnedPath); err != nil { + if err := sys.Unpin(p.pinnedPath); err != nil { return err } p.pinnedPath = "" @@ -699,6 +745,10 @@ func (p *Program) Test(in []byte) (uint32, []byte, error) { // // Note: the same restrictions from Test apply. func (p *Program) Run(opts *RunOptions) (uint32, error) { + if opts == nil { + opts = &RunOptions{} + } + ret, _, err := p.run(opts) if err != nil { return ret, fmt.Errorf("run program: %w", err) @@ -732,7 +782,11 @@ func (p *Program) Benchmark(in []byte, repeat int, reset func()) (uint32, time.D return ret, total, nil } -var haveProgRun = internal.NewFeatureTest("BPF_PROG_RUN", "4.12", func() error { +var haveProgRun = internal.NewFeatureTest("BPF_PROG_RUN", func() error { + if platform.IsWindows { + return nil + } + prog, err := NewProgram(&ProgramSpec{ // SocketFilter does not require privileges on newer kernels. Type: SocketFilter, @@ -752,7 +806,7 @@ var haveProgRun = internal.NewFeatureTest("BPF_PROG_RUN", "4.12", func() error { attr := sys.ProgRunAttr{ ProgFd: uint32(prog.FD()), DataSizeIn: uint32(len(in)), - DataIn: sys.NewSlicePointer(in), + DataIn: sys.SlicePointer(in), } err = sys.ProgRun(&attr) @@ -774,7 +828,7 @@ var haveProgRun = internal.NewFeatureTest("BPF_PROG_RUN", "4.12", func() error { } return err -}) +}, "4.12", "windows:0.20") func (p *Program) run(opts *RunOptions) (uint32, time.Duration, error) { if uint(len(opts.Data)) > math.MaxUint32 { @@ -785,13 +839,13 @@ func (p *Program) run(opts *RunOptions) (uint32, time.Duration, error) { return 0, 0, err } - var ctxBytes []byte + var ctxIn []byte if opts.Context != nil { - ctx := new(bytes.Buffer) - if err := binary.Write(ctx, internal.NativeEndian, opts.Context); err != nil { + var err error + ctxIn, err = binary.Append(nil, internal.NativeEndian, opts.Context) + if err != nil { return 0, 0, fmt.Errorf("cannot serialize context: %v", err) } - ctxBytes = ctx.Bytes() } var ctxOut []byte @@ -803,13 +857,13 @@ func (p *Program) run(opts *RunOptions) (uint32, time.Duration, error) { ProgFd: p.fd.Uint(), DataSizeIn: uint32(len(opts.Data)), DataSizeOut: uint32(len(opts.DataOut)), - DataIn: sys.NewSlicePointer(opts.Data), - DataOut: sys.NewSlicePointer(opts.DataOut), + DataIn: sys.SlicePointer(opts.Data), + DataOut: sys.SlicePointer(opts.DataOut), Repeat: uint32(opts.Repeat), - CtxSizeIn: uint32(len(ctxBytes)), + CtxSizeIn: uint32(len(ctxIn)), CtxSizeOut: uint32(len(ctxOut)), - CtxIn: sys.NewSlicePointer(ctxBytes), - CtxOut: sys.NewSlicePointer(ctxOut), + CtxIn: sys.SlicePointer(ctxIn), + CtxOut: sys.SlicePointer(ctxOut), Flags: opts.Flags, Cpu: opts.CPU, } @@ -883,6 +937,10 @@ func unmarshalProgram(buf sysenc.Buffer) (*Program, error) { } func marshalProgram(p *Program, length int) ([]byte, error) { + if p == nil { + return nil, errors.New("can't marshal a nil Program") + } + if length != 4 { return nil, fmt.Errorf("can't marshal program to %d bytes", length) } @@ -892,11 +950,12 @@ func marshalProgram(p *Program, length int) ([]byte, error) { return buf, nil } -// LoadPinnedProgram loads a Program from a BPF file. +// LoadPinnedProgram loads a Program from a pin (file) on the BPF virtual +// filesystem. // // Requires at least Linux 4.11. func LoadPinnedProgram(fileName string, opts *LoadPinOptions) (*Program, error) { - fd, err := sys.ObjGet(&sys.ObjGetAttr{ + fd, typ, err := sys.ObjGetTyped(&sys.ObjGetAttr{ Pathname: sys.NewStringPointer(fileName), FileFlags: opts.Marshal(), }) @@ -904,6 +963,11 @@ func LoadPinnedProgram(fileName string, opts *LoadPinOptions) (*Program, error) return nil, err } + if typ != sys.BPF_TYPE_PROG { + _ = fd.Close() + return nil, fmt.Errorf("%s is not a Program", fileName) + } + info, err := newProgramInfoFromFd(fd) if err != nil { _ = fd.Close() @@ -920,22 +984,6 @@ func LoadPinnedProgram(fileName string, opts *LoadPinOptions) (*Program, error) return &Program{"", fd, progName, fileName, info.Type}, nil } -// SanitizeName replaces all invalid characters in name with replacement. -// Passing a negative value for replacement will delete characters instead -// of replacing them. Use this to automatically generate valid names for maps -// and programs at runtime. -// -// The set of allowed characters depends on the running kernel version. -// Dots are only allowed as of kernel 5.2. -func SanitizeName(name string, replacement rune) string { - return strings.Map(func(char rune) rune { - if invalidBPFObjNameChar(char) { - return replacement - } - return char - }, name) -} - // ProgramGetNextID returns the ID of the next eBPF program. // // Returns ErrNotExist, if there is no next eBPF program. @@ -1045,6 +1093,10 @@ func findProgramTargetInKernel(name string, progType ProgramType, attachType Att // Returns a non-nil handle if the type was found in a module, or btf.ErrNotFound // if the type wasn't found at all. func findTargetInKernel(kernelSpec *btf.Spec, typeName string, target *btf.Type) (*btf.Spec, *btf.Handle, error) { + if kernelSpec == nil { + return nil, nil, fmt.Errorf("nil kernelSpec: %w", btf.ErrNotFound) + } + err := kernelSpec.TypeByName(typeName, target) if errors.Is(err, btf.ErrNotFound) { spec, module, err := findTargetInModule(kernelSpec, typeName, target) @@ -1139,3 +1191,19 @@ func findTargetInProgram(prog *Program, name string, progType ProgramType, attac return spec.TypeID(targetFunc) } + +func newBTFCache(opts *ProgramOptions) *btf.Cache { + c := btf.NewCache() + if opts.KernelTypes != nil { + c.KernelTypes = opts.KernelTypes + c.ModuleTypes = opts.KernelModuleTypes + if opts.KernelModuleTypes != nil { + c.LoadedModules = []string{} + for name := range opts.KernelModuleTypes { + c.LoadedModules = append(c.LoadedModules, name) + } + sort.Strings(c.LoadedModules) + } + } + return c +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/syscalls.go b/src/runtime/vendor/github.com/cilium/ebpf/syscalls.go index 4aef7faebc8a..f0f42b77d36f 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/syscalls.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/syscalls.go @@ -7,9 +7,12 @@ import ( "math" "os" "runtime" + "strings" "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/linux" + "github.com/cilium/ebpf/internal/platform" "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/tracefs" "github.com/cilium/ebpf/internal/unix" @@ -24,25 +27,41 @@ var ( sysErrNotSupported = sys.Error(ErrNotSupported, sys.ENOTSUPP) ) -// invalidBPFObjNameChar returns true if char may not appear in -// a BPF object name. -func invalidBPFObjNameChar(char rune) bool { - dotAllowed := objNameAllowsDot() == nil - - switch { - case char >= 'A' && char <= 'Z': - return false - case char >= 'a' && char <= 'z': - return false - case char >= '0' && char <= '9': - return false - case dotAllowed && char == '.': - return false - case char == '_': - return false - default: - return true +// sanitizeName replaces all invalid characters in name with replacement. +// Passing a negative value for replacement will delete characters instead +// of replacing them. +// +// The set of allowed characters may change over time. +func sanitizeName(name string, replacement rune) string { + return strings.Map(func(char rune) rune { + switch { + case char >= 'A' && char <= 'Z': + return char + case char >= 'a' && char <= 'z': + return char + case char >= '0' && char <= '9': + return char + case char == '.': + return char + case char == '_': + return char + default: + return replacement + } + }, name) +} + +func maybeFillObjName(name string) sys.ObjName { + if errors.Is(haveObjName(), ErrNotSupported) { + return sys.ObjName{} + } + + name = sanitizeName(name, -1) + if errors.Is(objNameAllowsDot(), ErrNotSupported) { + name = strings.ReplaceAll(name, ".", "") } + + return sys.NewObjName(name) } func progLoad(insns asm.Instructions, typ ProgramType, license string) (*sys.FD, error) { @@ -55,12 +74,17 @@ func progLoad(insns asm.Instructions, typ ProgramType, license string) (*sys.FD, return sys.ProgLoad(&sys.ProgLoadAttr{ ProgType: sys.ProgType(typ), License: sys.NewStringPointer(license), - Insns: sys.NewSlicePointer(bytecode), + Insns: sys.SlicePointer(bytecode), InsnCnt: uint32(len(bytecode) / asm.InstructionSize), }) } -var haveNestedMaps = internal.NewFeatureTest("nested maps", "4.12", func() error { +var haveNestedMaps = internal.NewFeatureTest("nested maps", func() error { + if platform.IsWindows { + // We only support efW versions which have this feature, no need to probe. + return nil + } + _, err := sys.MapCreate(&sys.MapCreateAttr{ MapType: sys.MapType(ArrayOfMaps), KeySize: 4, @@ -76,9 +100,9 @@ var haveNestedMaps = internal.NewFeatureTest("nested maps", "4.12", func() error return nil } return err -}) +}, "4.12", "windows:0.21.0") -var haveMapMutabilityModifiers = internal.NewFeatureTest("read- and write-only maps", "5.2", func() error { +var haveMapMutabilityModifiers = internal.NewFeatureTest("read- and write-only maps", func() error { // This checks BPF_F_RDONLY_PROG and BPF_F_WRONLY_PROG. Since // BPF_MAP_FREEZE appeared in 5.2 as well we don't do a separate check. m, err := sys.MapCreate(&sys.MapCreateAttr{ @@ -86,39 +110,39 @@ var haveMapMutabilityModifiers = internal.NewFeatureTest("read- and write-only m KeySize: 4, ValueSize: 4, MaxEntries: 1, - MapFlags: unix.BPF_F_RDONLY_PROG, + MapFlags: sys.BPF_F_RDONLY_PROG, }) if err != nil { return internal.ErrNotSupported } _ = m.Close() return nil -}) +}, "5.2") -var haveMmapableMaps = internal.NewFeatureTest("mmapable maps", "5.5", func() error { +var haveMmapableMaps = internal.NewFeatureTest("mmapable maps", func() error { // This checks BPF_F_MMAPABLE, which appeared in 5.5 for array maps. m, err := sys.MapCreate(&sys.MapCreateAttr{ MapType: sys.MapType(Array), KeySize: 4, ValueSize: 4, MaxEntries: 1, - MapFlags: unix.BPF_F_MMAPABLE, + MapFlags: sys.BPF_F_MMAPABLE, }) if err != nil { return internal.ErrNotSupported } _ = m.Close() return nil -}) +}, "5.5") -var haveInnerMaps = internal.NewFeatureTest("inner maps", "5.10", func() error { +var haveInnerMaps = internal.NewFeatureTest("inner maps", func() error { // This checks BPF_F_INNER_MAP, which appeared in 5.10. m, err := sys.MapCreate(&sys.MapCreateAttr{ MapType: sys.MapType(Array), KeySize: 4, ValueSize: 4, MaxEntries: 1, - MapFlags: unix.BPF_F_INNER_MAP, + MapFlags: sys.BPF_F_INNER_MAP, }) if err != nil { @@ -126,16 +150,16 @@ var haveInnerMaps = internal.NewFeatureTest("inner maps", "5.10", func() error { } _ = m.Close() return nil -}) +}, "5.10") -var haveNoPreallocMaps = internal.NewFeatureTest("prealloc maps", "4.6", func() error { +var haveNoPreallocMaps = internal.NewFeatureTest("prealloc maps", func() error { // This checks BPF_F_NO_PREALLOC, which appeared in 4.6. m, err := sys.MapCreate(&sys.MapCreateAttr{ MapType: sys.MapType(Hash), KeySize: 4, ValueSize: 4, MaxEntries: 1, - MapFlags: unix.BPF_F_NO_PREALLOC, + MapFlags: sys.BPF_F_NO_PREALLOC, }) if err != nil { @@ -143,7 +167,7 @@ var haveNoPreallocMaps = internal.NewFeatureTest("prealloc maps", "4.6", func() } _ = m.Close() return nil -}) +}, "4.6") func wrapMapError(err error) error { if err == nil { @@ -169,7 +193,12 @@ func wrapMapError(err error) error { return err } -var haveObjName = internal.NewFeatureTest("object names", "4.15", func() error { +var haveObjName = internal.NewFeatureTest("object names", func() error { + if platform.IsWindows { + // We only support efW versions which have this feature, no need to probe. + return nil + } + attr := sys.MapCreateAttr{ MapType: sys.MapType(Array), KeySize: 4, @@ -185,9 +214,14 @@ var haveObjName = internal.NewFeatureTest("object names", "4.15", func() error { _ = fd.Close() return nil -}) +}, "4.15", "windows:0.21.0") + +var objNameAllowsDot = internal.NewFeatureTest("dot in object names", func() error { + if platform.IsWindows { + // We only support efW versions which have this feature, no need to probe. + return nil + } -var objNameAllowsDot = internal.NewFeatureTest("dot in object names", "5.2", func() error { if err := haveObjName(); err != nil { return err } @@ -207,9 +241,9 @@ var objNameAllowsDot = internal.NewFeatureTest("dot in object names", "5.2", fun _ = fd.Close() return nil -}) +}, "5.2", "windows:0.21.0") -var haveBatchAPI = internal.NewFeatureTest("map batch api", "5.6", func() error { +var haveBatchAPI = internal.NewFeatureTest("map batch api", func() error { var maxEntries uint32 = 2 attr := sys.MapCreateAttr{ MapType: sys.MapType(Hash), @@ -239,9 +273,9 @@ var haveBatchAPI = internal.NewFeatureTest("map batch api", "5.6", func() error return internal.ErrNotSupported } return nil -}) +}, "5.6") -var haveProbeReadKernel = internal.NewFeatureTest("bpf_probe_read_kernel", "5.5", func() error { +var haveProbeReadKernel = internal.NewFeatureTest("bpf_probe_read_kernel", func() error { insns := asm.Instructions{ asm.Mov.Reg(asm.R1, asm.R10), asm.Add.Imm(asm.R1, -8), @@ -257,9 +291,9 @@ var haveProbeReadKernel = internal.NewFeatureTest("bpf_probe_read_kernel", "5.5" } _ = fd.Close() return nil -}) +}, "5.5") -var haveBPFToBPFCalls = internal.NewFeatureTest("bpf2bpf calls", "4.16", func() error { +var haveBPFToBPFCalls = internal.NewFeatureTest("bpf2bpf calls", func() error { insns := asm.Instructions{ asm.Call.Label("prog2").WithSymbol("prog1"), asm.Return(), @@ -273,10 +307,10 @@ var haveBPFToBPFCalls = internal.NewFeatureTest("bpf2bpf calls", "4.16", func() } _ = fd.Close() return nil -}) +}, "4.16") -var haveSyscallWrapper = internal.NewFeatureTest("syscall wrapper", "4.17", func() error { - prefix := internal.PlatformPrefix() +var haveSyscallWrapper = internal.NewFeatureTest("syscall wrapper", func() error { + prefix := linux.PlatformPrefix() if prefix == "" { return fmt.Errorf("unable to find the platform prefix for (%s)", runtime.GOARCH) } @@ -302,9 +336,9 @@ var haveSyscallWrapper = internal.NewFeatureTest("syscall wrapper", "4.17", func } return evt.Close() -}) +}, "4.17") -var haveProgramExtInfos = internal.NewFeatureTest("program ext_infos", "5.0", func() error { +var haveProgramExtInfos = internal.NewFeatureTest("program ext_infos", func() error { insns := asm.Instructions{ asm.Mov.Imm(asm.R0, 0), asm.Return(), @@ -319,7 +353,7 @@ var haveProgramExtInfos = internal.NewFeatureTest("program ext_infos", "5.0", fu _, err := sys.ProgLoad(&sys.ProgLoadAttr{ ProgType: sys.ProgType(SocketFilter), License: sys.NewStringPointer("MIT"), - Insns: sys.NewSlicePointer(bytecode), + Insns: sys.SlicePointer(bytecode), InsnCnt: uint32(len(bytecode) / asm.InstructionSize), FuncInfoCnt: 1, ProgBtfFd: math.MaxUint32, @@ -334,4 +368,4 @@ var haveProgramExtInfos = internal.NewFeatureTest("program ext_infos", "5.0", fu } return err -}) +}, "5.0") diff --git a/src/runtime/vendor/github.com/cilium/ebpf/types.go b/src/runtime/vendor/github.com/cilium/ebpf/types.go index 542c2397cab4..56e31820834c 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/types.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/types.go @@ -1,8 +1,8 @@ package ebpf import ( + "github.com/cilium/ebpf/internal/platform" "github.com/cilium/ebpf/internal/sys" - "github.com/cilium/ebpf/internal/unix" ) //go:generate go run golang.org/x/tools/cmd/stringer@latest -output types_string.go -type=MapType,ProgramType,PinType @@ -13,7 +13,7 @@ type MapType uint32 // All the various map types that can be created const ( - UnspecifiedMap MapType = iota + UnspecifiedMap MapType = MapType(platform.LinuxTag | iota) // Hash is a hash map Hash // Array is an array map @@ -95,11 +95,50 @@ const ( InodeStorage // TaskStorage - Specialized local storage map for task_struct. TaskStorage + // BloomFilter - Space-efficient data structure to quickly test whether an element exists in a set. + BloomFilter + // UserRingbuf - The reverse of RingBuf, used to send messages from user space to BPF programs. + UserRingbuf + // CgroupStorage - Store data keyed on a cgroup. If the cgroup disappears, the key is automatically removed. + CgroupStorage + // Arena - Sparse shared memory region between a BPF program and user space. + Arena ) +// Map types (Windows). +const ( + WindowsHash MapType = MapType(platform.WindowsTag | iota + 1) + WindowsArray + WindowsProgramArray + WindowsPerCPUHash + WindowsPerCPUArray + WindowsHashOfMaps + WindowsArrayOfMaps + WindowsLRUHash + WindowsLPMTrie + WindowsQueue + WindowsLRUCPUHash + WindowsStack + WindowsRingBuf +) + +// MapTypeForPlatform returns a platform specific map type. +// +// Use this if the library doesn't provide a constant yet. +func MapTypeForPlatform(plat string, typ uint32) (MapType, error) { + return platform.EncodeConstant[MapType](plat, typ) +} + // hasPerCPUValue returns true if the Map stores a value per CPU. func (mt MapType) hasPerCPUValue() bool { - return mt == PerCPUHash || mt == PerCPUArray || mt == LRUCPUHash || mt == PerCPUCGroupStorage + switch mt { + case PerCPUHash, PerCPUArray, LRUCPUHash, PerCPUCGroupStorage: + return true + case WindowsPerCPUHash, WindowsPerCPUArray, WindowsLRUCPUHash: + return true + default: + return false + } } // canStoreMapOrProgram returns true if the Map stores references to another Map @@ -111,19 +150,48 @@ func (mt MapType) canStoreMapOrProgram() bool { // canStoreMap returns true if the map type accepts a map fd // for update and returns a map id for lookup. func (mt MapType) canStoreMap() bool { - return mt == ArrayOfMaps || mt == HashOfMaps + return mt == ArrayOfMaps || mt == HashOfMaps || mt == WindowsArrayOfMaps || mt == WindowsHashOfMaps } // canStoreProgram returns true if the map type accepts a program fd // for update and returns a program id for lookup. func (mt MapType) canStoreProgram() bool { - return mt == ProgramArray + return mt == ProgramArray || mt == WindowsProgramArray +} + +// canHaveValueSize returns true if the map type supports setting a value size. +func (mt MapType) canHaveValueSize() bool { + switch mt { + case RingBuf, Arena: + return false + + // Special-case perf events since they require a value size of either 0 or 4 + // for historical reasons. Let the library fix this up later. + case PerfEventArray: + return false + } + + return true +} + +// mustHaveNoPrealloc returns true if the map type does not support +// preallocation and needs the BPF_F_NO_PREALLOC flag set to be created +// successfully. +func (mt MapType) mustHaveNoPrealloc() bool { + switch mt { + case CgroupStorage, InodeStorage, TaskStorage, SkStorage: + return true + case LPMTrie: + return true + } + + return false } // ProgramType of the eBPF program type ProgramType uint32 -// eBPF program types +// eBPF program types (Linux). const ( UnspecifiedProgram = ProgramType(sys.BPF_PROG_TYPE_UNSPEC) SocketFilter = ProgramType(sys.BPF_PROG_TYPE_SOCKET_FILTER) @@ -160,6 +228,25 @@ const ( Netfilter = ProgramType(sys.BPF_PROG_TYPE_NETFILTER) ) +// eBPF program types (Windows). +// +// See https://github.com/microsoft/ebpf-for-windows/blob/main/include/ebpf_structs.h#L170 +const ( + WindowsXDP ProgramType = ProgramType(platform.WindowsTag) | (iota + 1) + WindowsBind + WindowsCGroupSockAddr + WindowsSockOps + WindowsXDPTest ProgramType = ProgramType(platform.WindowsTag) | 998 + WindowsSample ProgramType = ProgramType(platform.WindowsTag) | 999 +) + +// ProgramTypeForPlatform returns a platform specific program type. +// +// Use this if the library doesn't provide a constant yet. +func ProgramTypeForPlatform(plat string, value uint32) (ProgramType, error) { + return platform.EncodeConstant[ProgramType](plat, value) +} + // AttachType of the eBPF program, needed to differentiate allowed context accesses in // some newer program types like CGroupSockAddr. Should be set to AttachNone if not required. // Will cause invalid argument (EINVAL) at program load time if set incorrectly. @@ -170,6 +257,7 @@ type AttachType uint32 // AttachNone is an alias for AttachCGroupInetIngress for readability reasons. const AttachNone AttachType = 0 +// Attach types (Linux). const ( AttachCGroupInetIngress = AttachType(sys.BPF_CGROUP_INET_INGRESS) AttachCGroupInetEgress = AttachType(sys.BPF_CGROUP_INET_EGRESS) @@ -214,6 +302,7 @@ const ( AttachSkReuseportSelectOrMigrate = AttachType(sys.BPF_SK_REUSEPORT_SELECT_OR_MIGRATE) AttachPerfEvent = AttachType(sys.BPF_PERF_EVENT) AttachTraceKprobeMulti = AttachType(sys.BPF_TRACE_KPROBE_MULTI) + AttachTraceKprobeSession = AttachType(sys.BPF_TRACE_KPROBE_SESSION) AttachLSMCgroup = AttachType(sys.BPF_LSM_CGROUP) AttachStructOps = AttachType(sys.BPF_STRUCT_OPS) AttachNetfilter = AttachType(sys.BPF_NETFILTER) @@ -229,6 +318,28 @@ const ( AttachNetkitPeer = AttachType(sys.BPF_NETKIT_PEER) ) +// Attach types (Windows). +// +// See https://github.com/microsoft/ebpf-for-windows/blob/main/include/ebpf_structs.h#L260 +const ( + AttachWindowsXDP = AttachType(platform.WindowsTag | iota + 1) + AttachWindowsBind + AttachWindowsCGroupInet4Connect + AttachWindowsCGroupInet6Connect + AttachWindowsCgroupInet4RecvAccept + AttachWindowsCgroupInet6RecvAccept + AttachWindowsCGroupSockOps + AttachWindowsSample + AttachWindowsXDPTest +) + +// AttachTypeForPlatform returns a platform specific attach type. +// +// Use this if the library doesn't provide a constant yet. +func AttachTypeForPlatform(plat string, value uint32) (AttachType, error) { + return platform.EncodeConstant[AttachType](plat, value) +} + // AttachFlags of the eBPF program used in BPF_PROG_ATTACH command type AttachFlags uint32 @@ -263,10 +374,10 @@ func (lpo *LoadPinOptions) Marshal() uint32 { flags := lpo.Flags if lpo.ReadOnly { - flags |= unix.BPF_F_RDONLY + flags |= sys.BPF_F_RDONLY } if lpo.WriteOnly { - flags |= unix.BPF_F_WRONLY + flags |= sys.BPF_F_WRONLY } return flags } diff --git a/src/runtime/vendor/github.com/cilium/ebpf/types_string.go b/src/runtime/vendor/github.com/cilium/ebpf/types_string.go index ee60b5be5b64..efcd6a7dd0c0 100644 --- a/src/runtime/vendor/github.com/cilium/ebpf/types_string.go +++ b/src/runtime/vendor/github.com/cilium/ebpf/types_string.go @@ -38,17 +38,45 @@ func _() { _ = x[RingBuf-27] _ = x[InodeStorage-28] _ = x[TaskStorage-29] + _ = x[BloomFilter-30] + _ = x[UserRingbuf-31] + _ = x[CgroupStorage-32] + _ = x[Arena-33] + _ = x[WindowsHash-268435457] + _ = x[WindowsArray-268435458] + _ = x[WindowsProgramArray-268435459] + _ = x[WindowsPerCPUHash-268435460] + _ = x[WindowsPerCPUArray-268435461] + _ = x[WindowsHashOfMaps-268435462] + _ = x[WindowsArrayOfMaps-268435463] + _ = x[WindowsLRUHash-268435464] + _ = x[WindowsLPMTrie-268435465] + _ = x[WindowsQueue-268435466] + _ = x[WindowsLRUCPUHash-268435467] + _ = x[WindowsStack-268435468] + _ = x[WindowsRingBuf-268435469] } -const _MapType_name = "UnspecifiedMapHashArrayProgramArrayPerfEventArrayPerCPUHashPerCPUArrayStackTraceCGroupArrayLRUHashLRUCPUHashLPMTrieArrayOfMapsHashOfMapsDevMapSockMapCPUMapXSKMapSockHashCGroupStorageReusePortSockArrayPerCPUCGroupStorageQueueStackSkStorageDevMapHashStructOpsMapRingBufInodeStorageTaskStorage" +const ( + _MapType_name_0 = "UnspecifiedMapHashArrayProgramArrayPerfEventArrayPerCPUHashPerCPUArrayStackTraceCGroupArrayLRUHashLRUCPUHashLPMTrieArrayOfMapsHashOfMapsDevMapSockMapCPUMapXSKMapSockHashCGroupStorageReusePortSockArrayPerCPUCGroupStorageQueueStackSkStorageDevMapHashStructOpsMapRingBufInodeStorageTaskStorageBloomFilterUserRingbufCgroupStorageArena" + _MapType_name_1 = "WindowsHashWindowsArrayWindowsProgramArrayWindowsPerCPUHashWindowsPerCPUArrayWindowsHashOfMapsWindowsArrayOfMapsWindowsLRUHashWindowsLPMTrieWindowsQueueWindowsLRUCPUHashWindowsStackWindowsRingBuf" +) -var _MapType_index = [...]uint16{0, 14, 18, 23, 35, 49, 59, 70, 80, 91, 98, 108, 115, 126, 136, 142, 149, 155, 161, 169, 182, 200, 219, 224, 229, 238, 248, 260, 267, 279, 290} +var ( + _MapType_index_0 = [...]uint16{0, 14, 18, 23, 35, 49, 59, 70, 80, 91, 98, 108, 115, 126, 136, 142, 149, 155, 161, 169, 182, 200, 219, 224, 229, 238, 248, 260, 267, 279, 290, 301, 312, 325, 330} + _MapType_index_1 = [...]uint8{0, 11, 23, 42, 59, 77, 94, 112, 126, 140, 152, 169, 181, 195} +) func (i MapType) String() string { - if i >= MapType(len(_MapType_index)-1) { + switch { + case i <= 33: + return _MapType_name_0[_MapType_index_0[i]:_MapType_index_0[i+1]] + case 268435457 <= i && i <= 268435469: + i -= 268435457 + return _MapType_name_1[_MapType_index_1[i]:_MapType_index_1[i+1]] + default: return "MapType(" + strconv.FormatInt(int64(i), 10) + ")" } - return _MapType_name[_MapType_index[i]:_MapType_index[i+1]] } func _() { // An "invalid array index" compiler error signifies that the constant values have changed. @@ -87,17 +115,39 @@ func _() { _ = x[SkLookup-30] _ = x[Syscall-31] _ = x[Netfilter-32] + _ = x[WindowsXDP-268435457] + _ = x[WindowsBind-268435458] + _ = x[WindowsCGroupSockAddr-268435459] + _ = x[WindowsSockOps-268435460] + _ = x[WindowsXDPTest-268436454] + _ = x[WindowsSample-268436455] } -const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookupSyscallNetfilter" +const ( + _ProgramType_name_0 = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookupSyscallNetfilter" + _ProgramType_name_1 = "WindowsXDPWindowsBindWindowsCGroupSockAddrWindowsSockOps" + _ProgramType_name_2 = "WindowsXDPTestWindowsSample" +) -var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294, 301, 310} +var ( + _ProgramType_index_0 = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294, 301, 310} + _ProgramType_index_1 = [...]uint8{0, 10, 21, 42, 56} + _ProgramType_index_2 = [...]uint8{0, 14, 27} +) func (i ProgramType) String() string { - if i >= ProgramType(len(_ProgramType_index)-1) { + switch { + case i <= 32: + return _ProgramType_name_0[_ProgramType_index_0[i]:_ProgramType_index_0[i+1]] + case 268435457 <= i && i <= 268435460: + i -= 268435457 + return _ProgramType_name_1[_ProgramType_index_1[i]:_ProgramType_index_1[i+1]] + case 268436454 <= i && i <= 268436455: + i -= 268436454 + return _ProgramType_name_2[_ProgramType_index_2[i]:_ProgramType_index_2[i+1]] + default: return "ProgramType(" + strconv.FormatInt(int64(i), 10) + ")" } - return _ProgramType_name[_ProgramType_index[i]:_ProgramType_index[i+1]] } func _() { // An "invalid array index" compiler error signifies that the constant values have changed. diff --git a/src/runtime/vendor/github.com/cilium/ebpf/types_windows.go b/src/runtime/vendor/github.com/cilium/ebpf/types_windows.go new file mode 100644 index 000000000000..0b7e836b0dc2 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/types_windows.go @@ -0,0 +1,57 @@ +package ebpf + +import ( + "fmt" + "os" + + "golang.org/x/sys/windows" + + "github.com/cilium/ebpf/internal/efw" + "github.com/cilium/ebpf/internal/platform" +) + +// WindowsProgramTypeForGUID resolves a GUID to a ProgramType. +// +// The GUID must be in the form of "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}". +// +// Returns an error wrapping [os.ErrNotExist] if the GUID is not recignized. +func WindowsProgramTypeForGUID(guid string) (ProgramType, error) { + progTypeGUID, err := windows.GUIDFromString(guid) + if err != nil { + return 0, fmt.Errorf("parse GUID: %w", err) + } + + rawProgramType, err := efw.EbpfGetBpfProgramType(progTypeGUID) + if err != nil { + return 0, fmt.Errorf("get program type: %w", err) + } + + if rawProgramType == 0 { + return 0, fmt.Errorf("program type not found for GUID %v: %w", guid, os.ErrNotExist) + } + + return ProgramTypeForPlatform(platform.Windows, rawProgramType) +} + +// WindowsAttachTypeForGUID resolves a GUID to an AttachType. +// +// The GUID must be in the form of "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}". +// +// Returns an error wrapping [os.ErrNotExist] if the GUID is not recignized. +func WindowsAttachTypeForGUID(guid string) (AttachType, error) { + attachTypeGUID, err := windows.GUIDFromString(guid) + if err != nil { + return 0, fmt.Errorf("parse GUID: %w", err) + } + + rawAttachType, err := efw.EbpfGetBpfAttachType(attachTypeGUID) + if err != nil { + return 0, fmt.Errorf("get attach type: %w", err) + } + + if rawAttachType == 0 { + return 0, fmt.Errorf("attach type not found for GUID %v: %w", attachTypeGUID, os.ErrNotExist) + } + + return AttachTypeForPlatform(platform.Windows, rawAttachType) +} diff --git a/src/runtime/vendor/github.com/cilium/ebpf/variable.go b/src/runtime/vendor/github.com/cilium/ebpf/variable.go new file mode 100644 index 000000000000..c6fd55cba1a8 --- /dev/null +++ b/src/runtime/vendor/github.com/cilium/ebpf/variable.go @@ -0,0 +1,270 @@ +package ebpf + +import ( + "fmt" + "io" + "reflect" + + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal/sysenc" +) + +// VariableSpec is a convenience wrapper for modifying global variables of a +// CollectionSpec before loading it into the kernel. +// +// All operations on a VariableSpec's underlying MapSpec are performed in the +// host's native endianness. +type VariableSpec struct { + name string + offset uint64 + size uint64 + + m *MapSpec + t *btf.Var +} + +// Set sets the value of the VariableSpec to the provided input using the host's +// native endianness. +func (s *VariableSpec) Set(in any) error { + buf, err := sysenc.Marshal(in, int(s.size)) + if err != nil { + return fmt.Errorf("marshaling value %s: %w", s.name, err) + } + + b, _, err := s.m.dataSection() + if err != nil { + return fmt.Errorf("getting data section of map %s: %w", s.m.Name, err) + } + + if int(s.offset+s.size) > len(b) { + return fmt.Errorf("offset %d(+%d) for variable %s is out of bounds", s.offset, s.size, s.name) + } + + // MapSpec.Copy() performs a shallow copy. Fully copy the byte slice + // to avoid any changes affecting other copies of the MapSpec. + cpy := make([]byte, len(b)) + copy(cpy, b) + + buf.CopyTo(cpy[s.offset : s.offset+s.size]) + + s.m.Contents[0] = MapKV{Key: uint32(0), Value: cpy} + + return nil +} + +// Get writes the value of the VariableSpec to the provided output using the +// host's native endianness. +func (s *VariableSpec) Get(out any) error { + b, _, err := s.m.dataSection() + if err != nil { + return fmt.Errorf("getting data section of map %s: %w", s.m.Name, err) + } + + if int(s.offset+s.size) > len(b) { + return fmt.Errorf("offset %d(+%d) for variable %s is out of bounds", s.offset, s.size, s.name) + } + + if err := sysenc.Unmarshal(out, b[s.offset:s.offset+s.size]); err != nil { + return fmt.Errorf("unmarshaling value: %w", err) + } + + return nil +} + +// Size returns the size of the variable in bytes. +func (s *VariableSpec) Size() uint64 { + return s.size +} + +// MapName returns the name of the underlying MapSpec. +func (s *VariableSpec) MapName() string { + return s.m.Name +} + +// Offset returns the offset of the variable in the underlying MapSpec. +func (s *VariableSpec) Offset() uint64 { + return s.offset +} + +// Constant returns true if the VariableSpec represents a variable that is +// read-only from the perspective of the BPF program. +func (s *VariableSpec) Constant() bool { + return s.m.readOnly() +} + +// Type returns the [btf.Var] representing the variable in its data section. +// This is useful for inspecting the variable's decl tags and the type +// information of the inner type. +// +// Returns nil if the original ELF object did not contain BTF information. +func (s *VariableSpec) Type() *btf.Var { + return s.t +} + +func (s *VariableSpec) String() string { + return fmt.Sprintf("%s (type=%v, map=%s, offset=%d, size=%d)", s.name, s.t, s.m.Name, s.offset, s.size) +} + +// copy returns a new VariableSpec with the same values as the original, +// but with a different underlying MapSpec. This is useful when copying a +// CollectionSpec. Returns nil if a MapSpec with the same name is not found. +func (s *VariableSpec) copy(cpy *CollectionSpec) *VariableSpec { + out := &VariableSpec{ + name: s.name, + offset: s.offset, + size: s.size, + } + if s.t != nil { + out.t = btf.Copy(s.t).(*btf.Var) + } + + // Attempt to find a MapSpec with the same name in the copied CollectionSpec. + for _, m := range cpy.Maps { + if m.Name == s.m.Name { + out.m = m + return out + } + } + + return nil +} + +// Variable is a convenience wrapper for modifying global variables of a +// Collection after loading it into the kernel. Operations on a Variable are +// performed using direct memory access, bypassing the BPF map syscall API. +// +// On kernels older than 5.5, most interactions with Variable return +// [ErrNotSupported]. +type Variable struct { + name string + offset uint64 + size uint64 + t *btf.Var + + mm *Memory +} + +func newVariable(name string, offset, size uint64, t *btf.Var, mm *Memory) (*Variable, error) { + if mm != nil { + if int(offset+size) > mm.Size() { + return nil, fmt.Errorf("offset %d(+%d) is out of bounds", offset, size) + } + } + + return &Variable{ + name: name, + offset: offset, + size: size, + t: t, + mm: mm, + }, nil +} + +// Size returns the size of the variable. +func (v *Variable) Size() uint64 { + return v.size +} + +// ReadOnly returns true if the Variable represents a variable that is read-only +// after loading the Collection into the kernel. +// +// On systems without BPF_F_MMAPABLE support, ReadOnly always returns true. +func (v *Variable) ReadOnly() bool { + if v.mm == nil { + return true + } + return v.mm.ReadOnly() +} + +// Type returns the [btf.Var] representing the variable in its data section. +// This is useful for inspecting the variable's decl tags and the type +// information of the inner type. +// +// Returns nil if the original ELF object did not contain BTF information. +func (v *Variable) Type() *btf.Var { + return v.t +} + +func (v *Variable) String() string { + return fmt.Sprintf("%s (type=%v)", v.name, v.t) +} + +// Set the value of the Variable to the provided input. The input must marshal +// to the same length as the size of the Variable. +func (v *Variable) Set(in any) error { + if v.mm == nil { + return fmt.Errorf("variable %s: direct access requires Linux 5.5 or later: %w", v.name, ErrNotSupported) + } + + if v.ReadOnly() { + return fmt.Errorf("variable %s: %w", v.name, ErrReadOnly) + } + + if !v.mm.bounds(v.offset, v.size) { + return fmt.Errorf("variable %s: access out of bounds: %w", v.name, io.EOF) + } + + buf, err := sysenc.Marshal(in, int(v.size)) + if err != nil { + return fmt.Errorf("marshaling value %s: %w", v.name, err) + } + + if _, err := v.mm.WriteAt(buf.Bytes(), int64(v.offset)); err != nil { + return fmt.Errorf("writing value to %s: %w", v.name, err) + } + + return nil +} + +// Get writes the value of the Variable to the provided output. The output must +// be a pointer to a value whose size matches the Variable. +func (v *Variable) Get(out any) error { + if v.mm == nil { + return fmt.Errorf("variable %s: direct access requires Linux 5.5 or later: %w", v.name, ErrNotSupported) + } + + if !v.mm.bounds(v.offset, v.size) { + return fmt.Errorf("variable %s: access out of bounds: %w", v.name, io.EOF) + } + + if err := sysenc.Unmarshal(out, v.mm.b[v.offset:v.offset+v.size]); err != nil { + return fmt.Errorf("unmarshaling value %s: %w", v.name, err) + } + + return nil +} + +func checkVariable[T any](v *Variable) error { + if v.ReadOnly() { + return ErrReadOnly + } + + t := reflect.TypeFor[T]() + size := uint64(t.Size()) + if t.Kind() == reflect.Uintptr && v.size == 8 { + // uintptr is 8 bytes on 64-bit and 4 on 32-bit. In BPF/BTF, pointers are + // always 8 bytes. For the sake of portability, allow accessing 8-byte BPF + // variables as uintptr on 32-bit systems, since the upper 32 bits of the + // pointer should be zero anyway. + return nil + } + if v.size != size { + return fmt.Errorf("can't create %d-byte accessor to %d-byte variable: %w", size, v.size, ErrInvalidType) + } + + return nil +} + +// VariablePointer returns a pointer to a variable of type T backed by memory +// shared with the BPF program. Requires building the Go application with -tags +// ebpf_unsafe_memory_experiment. +// +// T must contain only fixed-size, non-Go-pointer types: bools, floats, +// (u)int[8-64], arrays, and structs containing them. Structs must embed +// [structs.HostLayout]. [ErrInvalidType] is returned if T is not a valid type. +func VariablePointer[T comparable](v *Variable) (*T, error) { + if err := checkVariable[T](v); err != nil { + return nil, fmt.Errorf("variable pointer %s: %w", v.name, err) + } + return memoryPointer[T](v.mm, v.offset) +} diff --git a/src/runtime/vendor/golang.org/x/exp/LICENSE b/src/runtime/vendor/golang.org/x/exp/LICENSE deleted file mode 100644 index 2a7cf70da6e4..000000000000 --- a/src/runtime/vendor/golang.org/x/exp/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright 2009 The Go Authors. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google LLC nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/runtime/vendor/golang.org/x/exp/PATENTS b/src/runtime/vendor/golang.org/x/exp/PATENTS deleted file mode 100644 index 733099041f84..000000000000 --- a/src/runtime/vendor/golang.org/x/exp/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/src/runtime/vendor/golang.org/x/exp/constraints/constraints.go b/src/runtime/vendor/golang.org/x/exp/constraints/constraints.go deleted file mode 100644 index 9d260bab19ac..000000000000 --- a/src/runtime/vendor/golang.org/x/exp/constraints/constraints.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package constraints defines a set of useful constraints to be used -// with type parameters. -package constraints - -import "cmp" - -// Signed is a constraint that permits any signed integer type. -// If future releases of Go add new predeclared signed integer types, -// this constraint will be modified to include them. -type Signed interface { - ~int | ~int8 | ~int16 | ~int32 | ~int64 -} - -// Unsigned is a constraint that permits any unsigned integer type. -// If future releases of Go add new predeclared unsigned integer types, -// this constraint will be modified to include them. -type Unsigned interface { - ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr -} - -// Integer is a constraint that permits any integer type. -// If future releases of Go add new predeclared integer types, -// this constraint will be modified to include them. -type Integer interface { - Signed | Unsigned -} - -// Float is a constraint that permits any floating-point type. -// If future releases of Go add new predeclared floating-point types, -// this constraint will be modified to include them. -type Float interface { - ~float32 | ~float64 -} - -// Complex is a constraint that permits any complex numeric type. -// If future releases of Go add new predeclared complex numeric types, -// this constraint will be modified to include them. -type Complex interface { - ~complex64 | ~complex128 -} - -// Ordered is a constraint that permits any ordered type: any type -// that supports the operators < <= >= >. -// If future releases of Go add new ordered types, -// this constraint will be modified to include them. -// -// This type is redundant since Go 1.21 introduced [cmp.Ordered]. -// -//go:fix inline -type Ordered = cmp.Ordered diff --git a/src/runtime/vendor/golang.org/x/net/http2/http2.go b/src/runtime/vendor/golang.org/x/net/http2/http2.go index 6c18ea230be0..ea5ae629fde0 100644 --- a/src/runtime/vendor/golang.org/x/net/http2/http2.go +++ b/src/runtime/vendor/golang.org/x/net/http2/http2.go @@ -11,8 +11,6 @@ // requires Go 1.6 or later) // // See https://http2.github.io/ for more information on HTTP/2. -// -// See https://http2.golang.org/ for a test server running this code. package http2 // import "golang.org/x/net/http2" import ( diff --git a/src/runtime/vendor/golang.org/x/sys/unix/mkerrors.sh b/src/runtime/vendor/golang.org/x/sys/unix/mkerrors.sh index 6ab02b6c3122..d1c8b2640ebd 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/src/runtime/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -349,6 +349,9 @@ struct ltchars { #define _HIDIOCGRAWPHYS HIDIOCGRAWPHYS(_HIDIOCGRAWPHYS_LEN) #define _HIDIOCGRAWUNIQ HIDIOCGRAWUNIQ(_HIDIOCGRAWUNIQ_LEN) +// Renamed in v6.16, commit c6d732c38f93 ("net: ethtool: remove duplicate defines for family info") +#define ETHTOOL_FAMILY_NAME ETHTOOL_GENL_NAME +#define ETHTOOL_FAMILY_VERSION ETHTOOL_GENL_VERSION ' includes_NetBSD=' diff --git a/src/runtime/vendor/golang.org/x/sys/unix/syscall_darwin.go b/src/runtime/vendor/golang.org/x/sys/unix/syscall_darwin.go index 798f61ad3bf9..7838ca5db200 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -602,14 +602,9 @@ func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocI return } -// sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) const minIovec = 8 func Readv(fd int, iovs [][]byte) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } - iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) n, err = readv(fd, iovecs) @@ -618,9 +613,6 @@ func Readv(fd int, iovs [][]byte) (n int, err error) { } func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) n, err = preadv(fd, iovecs, offset) @@ -629,10 +621,6 @@ func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { } func Writev(fd int, iovs [][]byte) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } - iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) if raceenabled { @@ -644,10 +632,6 @@ func Writev(fd int, iovs [][]byte) (n int, err error) { } func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } - iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) if raceenabled { @@ -707,45 +691,7 @@ func readvRacedetect(iovecs []Iovec, n int, err error) { } } -func darwinMajorMinPatch() (maj, min, patch int, err error) { - var un Utsname - err = Uname(&un) - if err != nil { - return - } - - var mmp [3]int - c := 0 -Loop: - for _, b := range un.Release[:] { - switch { - case b >= '0' && b <= '9': - mmp[c] = 10*mmp[c] + int(b-'0') - case b == '.': - c++ - if c > 2 { - return 0, 0, 0, ENOTSUP - } - case b == 0: - break Loop - default: - return 0, 0, 0, ENOTSUP - } - } - if c != 2 { - return 0, 0, 0, ENOTSUP - } - return mmp[0], mmp[1], mmp[2], nil -} - -func darwinKernelVersionMin(maj, min, patch int) bool { - actualMaj, actualMin, actualPatch, err := darwinMajorMinPatch() - if err != nil { - return false - } - return actualMaj > maj || actualMaj == maj && (actualMin > min || actualMin == min && actualPatch >= patch) -} - +//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux.go b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux.go index 9e7a6c5a4fca..b6db27d937c8 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -328,6 +328,8 @@ const ( AUDIT_KERNEL = 0x7d0 AUDIT_KERNEL_OTHER = 0x524 AUDIT_KERN_MODULE = 0x532 + AUDIT_LANDLOCK_ACCESS = 0x58f + AUDIT_LANDLOCK_DOMAIN = 0x590 AUDIT_LAST_FEATURE = 0x1 AUDIT_LAST_KERN_ANOM_MSG = 0x707 AUDIT_LAST_USER_MSG = 0x4af @@ -492,6 +494,7 @@ const ( BPF_F_BEFORE = 0x8 BPF_F_ID = 0x20 BPF_F_NETFILTER_IP_DEFRAG = 0x1 + BPF_F_PREORDER = 0x40 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_REDIRECT_FLAGS = 0x19 BPF_F_REPLACE = 0x4 @@ -528,6 +531,7 @@ const ( BPF_LDX = 0x1 BPF_LEN = 0x80 BPF_LL_OFF = -0x200000 + BPF_LOAD_ACQ = 0x100 BPF_LSH = 0x60 BPF_MAJOR_VERSION = 0x1 BPF_MAXINSNS = 0x1000 @@ -555,6 +559,7 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_ST = 0x2 + BPF_STORE_REL = 0x110 BPF_STX = 0x3 BPF_SUB = 0x10 BPF_TAG_SIZE = 0x8 @@ -844,9 +849,9 @@ const ( DM_UUID_FLAG = 0x4000 DM_UUID_LEN = 0x81 DM_VERSION = 0xc138fd00 - DM_VERSION_EXTRA = "-ioctl (2025-01-17)" + DM_VERSION_EXTRA = "-ioctl (2025-04-28)" DM_VERSION_MAJOR = 0x4 - DM_VERSION_MINOR = 0x31 + DM_VERSION_MINOR = 0x32 DM_VERSION_PATCHLEVEL = 0x0 DT_BLK = 0x6 DT_CHR = 0x2 @@ -937,9 +942,6 @@ const ( EPOLL_CTL_MOD = 0x3 EPOLL_IOC_TYPE = 0x8a EROFS_SUPER_MAGIC_V1 = 0xe0f5e1e2 - ESP_V4_FLOW = 0xa - ESP_V6_FLOW = 0xc - ETHER_FLOW = 0x12 ETHTOOL_BUSINFO_LEN = 0x20 ETHTOOL_EROMVERS_LEN = 0x20 ETHTOOL_FAMILY_NAME = "ethtool" @@ -1213,6 +1215,7 @@ const ( FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2 FAN_EVENT_INFO_TYPE_ERROR = 0x5 FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_INFO_TYPE_MNT = 0x7 FAN_EVENT_INFO_TYPE_NEW_DFID_NAME = 0xc FAN_EVENT_INFO_TYPE_OLD_DFID_NAME = 0xa FAN_EVENT_INFO_TYPE_PIDFD = 0x4 @@ -1231,9 +1234,12 @@ const ( FAN_MARK_IGNORED_SURV_MODIFY = 0x40 FAN_MARK_IGNORE_SURV = 0x440 FAN_MARK_INODE = 0x0 + FAN_MARK_MNTNS = 0x110 FAN_MARK_MOUNT = 0x10 FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 + FAN_MNT_ATTACH = 0x1000000 + FAN_MNT_DETACH = 0x2000000 FAN_MODIFY = 0x2 FAN_MOVE = 0xc0 FAN_MOVED_FROM = 0x40 @@ -1255,6 +1261,7 @@ const ( FAN_REPORT_DIR_FID = 0x400 FAN_REPORT_FD_ERROR = 0x2000 FAN_REPORT_FID = 0x200 + FAN_REPORT_MNT = 0x4000 FAN_REPORT_NAME = 0x800 FAN_REPORT_PIDFD = 0x80 FAN_REPORT_TARGET_FID = 0x1000 @@ -1274,6 +1281,7 @@ const ( FIB_RULE_PERMANENT = 0x1 FIB_RULE_UNRESOLVED = 0x4 FIDEDUPERANGE = 0xc0189436 + FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED = 0x1 FSCRYPT_KEY_DESCRIPTOR_SIZE = 0x8 FSCRYPT_KEY_DESC_PREFIX = "fscrypt:" FSCRYPT_KEY_DESC_PREFIX_SIZE = 0x8 @@ -1582,7 +1590,6 @@ const ( IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 IPV6_DSTOPTS = 0x3b - IPV6_FLOW = 0x11 IPV6_FREEBIND = 0x4e IPV6_HDRINCL = 0x24 IPV6_HOPLIMIT = 0x34 @@ -1633,7 +1640,6 @@ const ( IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 IPV6_UNICAST_IF = 0x4c - IPV6_USER_FLOW = 0xe IPV6_V6ONLY = 0x1a IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 @@ -1695,7 +1701,6 @@ const ( IP_TTL = 0x2 IP_UNBLOCK_SOURCE = 0x25 IP_UNICAST_IF = 0x32 - IP_USER_FLOW = 0xd IP_XFRM_POLICY = 0x11 ISOFS_SUPER_MAGIC = 0x9660 ISTRIP = 0x20 @@ -1817,7 +1822,11 @@ const ( LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 + LANDLOCK_CREATE_RULESET_ERRATA = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 + LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON = 0x2 + LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF = 0x1 + LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF = 0x4 LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET = 0x1 LANDLOCK_SCOPE_SIGNAL = 0x2 LINUX_REBOOT_CMD_CAD_OFF = 0x0 @@ -2493,6 +2502,10 @@ const ( PR_FP_EXC_UND = 0x40000 PR_FP_MODE_FR = 0x1 PR_FP_MODE_FRE = 0x2 + PR_FUTEX_HASH = 0x4e + PR_FUTEX_HASH_GET_IMMUTABLE = 0x3 + PR_FUTEX_HASH_GET_SLOTS = 0x2 + PR_FUTEX_HASH_SET_SLOTS = 0x1 PR_GET_AUXV = 0x41555856 PR_GET_CHILD_SUBREAPER = 0x25 PR_GET_DUMPABLE = 0x3 @@ -2652,6 +2665,10 @@ const ( PR_TAGGED_ADDR_ENABLE = 0x1 PR_TASK_PERF_EVENTS_DISABLE = 0x1f PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMER_CREATE_RESTORE_IDS = 0x4d + PR_TIMER_CREATE_RESTORE_IDS_GET = 0x2 + PR_TIMER_CREATE_RESTORE_IDS_OFF = 0x0 + PR_TIMER_CREATE_RESTORE_IDS_ON = 0x1 PR_TIMING_STATISTICAL = 0x0 PR_TIMING_TIMESTAMP = 0x1 PR_TSC_ENABLE = 0x1 @@ -2732,6 +2749,7 @@ const ( PTRACE_SETREGSET = 0x4205 PTRACE_SETSIGINFO = 0x4203 PTRACE_SETSIGMASK = 0x420b + PTRACE_SET_SYSCALL_INFO = 0x4212 PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG = 0x4210 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 @@ -2982,6 +3000,7 @@ const ( RTPROT_NTK = 0xf RTPROT_OPENR = 0x63 RTPROT_OSPF = 0xbc + RTPROT_OVN = 0x54 RTPROT_RA = 0x9 RTPROT_REDIRECT = 0x1 RTPROT_RIP = 0xbd @@ -3336,7 +3355,7 @@ const ( TASKSTATS_GENL_NAME = "TASKSTATS" TASKSTATS_GENL_VERSION = 0x1 TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0xf + TASKSTATS_VERSION = 0x10 TCIFLUSH = 0x0 TCIOFF = 0x2 TCIOFLUSH = 0x2 @@ -3406,8 +3425,6 @@ const ( TCP_TX_DELAY = 0x25 TCP_ULP = 0x1f TCP_USER_TIMEOUT = 0x12 - TCP_V4_FLOW = 0x1 - TCP_V6_FLOW = 0x5 TCP_WINDOW_CLAMP = 0xa TCP_ZEROCOPY_RECEIVE = 0x23 TFD_TIMER_ABSTIME = 0x1 @@ -3530,8 +3547,6 @@ const ( UDP_NO_CHECK6_RX = 0x66 UDP_NO_CHECK6_TX = 0x65 UDP_SEGMENT = 0x67 - UDP_V4_FLOW = 0x2 - UDP_V6_FLOW = 0x6 UMOUNT_NOFOLLOW = 0x8 USBDEVICE_SUPER_MAGIC = 0x9fa2 UTIME_NOW = 0x3fffffff @@ -3574,7 +3589,7 @@ const ( WDIOS_TEMPPANIC = 0x4 WDIOS_UNKNOWN = -0x1 WEXITED = 0x4 - WGALLOWEDIP_A_MAX = 0x3 + WGALLOWEDIP_A_MAX = 0x4 WGDEVICE_A_MAX = 0x8 WGPEER_A_MAX = 0xa WG_CMD_MAX = 0x1 @@ -3688,6 +3703,7 @@ const ( XDP_SHARED_UMEM = 0x1 XDP_STATISTICS = 0x7 XDP_TXMD_FLAGS_CHECKSUM = 0x2 + XDP_TXMD_FLAGS_LAUNCH_TIME = 0x4 XDP_TXMD_FLAGS_TIMESTAMP = 0x1 XDP_TX_METADATA = 0x2 XDP_TX_RING = 0x3 diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index a8c421e29b5e..1c37f9fbc45c 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -360,6 +361,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 9a88d18130fe..6f54d34aefc9 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -361,6 +362,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 7cb6a867efd5..783ec5c126f0 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -366,6 +367,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index d0ecd2c583bf..ca83d3ba162c 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 7a2940ae0a35..607e611c0cbe 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -353,6 +354,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index d14ca8f2ecf7..b9cb5bd3c09e 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 2da1bac1e3da..65b078a6382e 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 28727514b5f2..5298a3033d0a 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 7f287b54b5dd..7bc557c87618 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 7e5f9e6aa8de..152399bb04a1 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -414,6 +415,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 37c87952fcb6..1a1ce2409cf0 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -418,6 +419,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 5220133613a9..4231a1fb5787 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -418,6 +419,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 4bfe2b5b6e6b..21c0e9526656 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -350,6 +351,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index e3cffb869a3d..f00d1cd7cf48 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -422,6 +423,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index c219c8db3960..bc8d539e6af7 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -71,6 +71,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -461,6 +462,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x2 SO_PASSPIDFD = 0x55 + SO_PASSRIGHTS = 0x5c SO_PASSSEC = 0x1f SO_PEEK_OFF = 0x26 SO_PEERCRED = 0x40 diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index c79aaff306ae..aca56ee49474 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -462,4 +462,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 5eb450695e95..2ea1ef58c3ec 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -385,4 +385,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 05e502974458..d22c8af31968 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -426,4 +426,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 38c53ec51bb3..5ee264ae9743 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -329,4 +329,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 31d2e71a18e1..f9f03ebf5fa1 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -325,4 +325,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index f4184a336b0e..87c2118e8496 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -446,4 +446,5 @@ const ( SYS_GETXATTRAT = 4464 SYS_LISTXATTRAT = 4465 SYS_REMOVEXATTRAT = 4466 + SYS_OPEN_TREE_ATTR = 4467 ) diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 05b9962278f2..391ad102fb68 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -376,4 +376,5 @@ const ( SYS_GETXATTRAT = 5464 SYS_LISTXATTRAT = 5465 SYS_REMOVEXATTRAT = 5466 + SYS_OPEN_TREE_ATTR = 5467 ) diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 43a256e9e675..5656157757a9 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -376,4 +376,5 @@ const ( SYS_GETXATTRAT = 5464 SYS_LISTXATTRAT = 5465 SYS_REMOVEXATTRAT = 5466 + SYS_OPEN_TREE_ATTR = 5467 ) diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index eea5ddfc2207..0482b52e3c38 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -446,4 +446,5 @@ const ( SYS_GETXATTRAT = 4464 SYS_LISTXATTRAT = 4465 SYS_REMOVEXATTRAT = 4466 + SYS_OPEN_TREE_ATTR = 4467 ) diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 0d777bfbb140..71806f08f387 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -453,4 +453,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index b44636502561..e35a7105829d 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -425,4 +425,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 0c7d21c18816..2aea476705e1 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -425,4 +425,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 840539169878..6c9bb4e56078 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -330,4 +330,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index fcf1b790d6cf..680bc9915a31 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -391,4 +391,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 52d15b5f9d45..620f271052f9 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -404,4 +404,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux.go b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux.go index 8bcac2835f6a..cd236443f645 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -115,7 +115,9 @@ type Statx_t struct { Atomic_write_unit_max uint32 Atomic_write_segments_max uint32 Dio_read_offset_align uint32 - _ [9]uint64 + Atomic_write_unit_max_opt uint32 + _ [1]uint32 + _ [8]uint64 } type Fsid struct { @@ -199,7 +201,8 @@ type FscryptAddKeyArg struct { Key_spec FscryptKeySpecifier Raw_size uint32 Key_id uint32 - _ [8]uint32 + Flags uint32 + _ [7]uint32 } type FscryptRemoveKeyArg struct { @@ -2317,6 +2320,11 @@ const ( NFT_CT_AVGPKT = 0x10 NFT_CT_ZONE = 0x11 NFT_CT_EVENTMASK = 0x12 + NFT_CT_SRC_IP = 0x13 + NFT_CT_DST_IP = 0x14 + NFT_CT_SRC_IP6 = 0x15 + NFT_CT_DST_IP6 = 0x16 + NFT_CT_ID = 0x17 NFTA_CT_UNSPEC = 0x0 NFTA_CT_DREG = 0x1 NFTA_CT_KEY = 0x2 @@ -2597,8 +2605,8 @@ const ( SOF_TIMESTAMPING_BIND_PHC = 0x8000 SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000 - SOF_TIMESTAMPING_LAST = 0x20000 - SOF_TIMESTAMPING_MASK = 0x3ffff + SOF_TIMESTAMPING_LAST = 0x40000 + SOF_TIMESTAMPING_MASK = 0x7ffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -4044,7 +4052,7 @@ const ( ETHTOOL_A_TSINFO_PHC_INDEX = 0x5 ETHTOOL_A_TSINFO_STATS = 0x6 ETHTOOL_A_TSINFO_HWTSTAMP_PROVIDER = 0x7 - ETHTOOL_A_TSINFO_MAX = 0x7 + ETHTOOL_A_TSINFO_MAX = 0x9 ETHTOOL_A_CABLE_TEST_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_HEADER = 0x1 ETHTOOL_A_CABLE_TEST_MAX = 0x1 @@ -4130,6 +4138,19 @@ const ( ETHTOOL_A_TUNNEL_INFO_MAX = 0x2 ) +const ( + TCP_V4_FLOW = 0x1 + UDP_V4_FLOW = 0x2 + TCP_V6_FLOW = 0x5 + UDP_V6_FLOW = 0x6 + ESP_V4_FLOW = 0xa + ESP_V6_FLOW = 0xc + IP_USER_FLOW = 0xd + IPV6_USER_FLOW = 0xe + IPV6_FLOW = 0x11 + ETHER_FLOW = 0x12 +) + const SPEED_UNKNOWN = -0x1 type EthtoolDrvinfo struct { @@ -4780,7 +4801,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x150 + NL80211_ATTR_MAX = 0x151 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS = 0x143 @@ -5414,7 +5435,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x21 + NL80211_FREQUENCY_ATTR_MAX = 0x22 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc @@ -5530,7 +5551,7 @@ const ( NL80211_MAX_SUPP_SELECTORS = 0x80 NL80211_MBSSID_CONFIG_ATTR_EMA = 0x5 NL80211_MBSSID_CONFIG_ATTR_INDEX = 0x3 - NL80211_MBSSID_CONFIG_ATTR_MAX = 0x5 + NL80211_MBSSID_CONFIG_ATTR_MAX = 0x6 NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY = 0x2 NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES = 0x1 NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX = 0x4 diff --git a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 62db85f6cb72..485f2d3a1bc8 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -282,19 +282,13 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -330,17 +324,11 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 _ [4]byte Ac_tgetime uint64 @@ -348,10 +336,22 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 7d89d648d9aa..ecbd1ad8bc54 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -300,16 +300,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -344,27 +338,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 9c0b39eec761..02f0463a44b2 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -91,7 +91,7 @@ type Stat_t struct { Gid uint32 Rdev uint64 _ uint16 - _ [4]byte + _ [6]byte Size int64 Blksize int32 _ [4]byte @@ -273,19 +273,13 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]uint8 @@ -321,17 +315,11 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 _ [4]byte Ac_tgetime uint64 @@ -339,10 +327,22 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index de9c7ff36cfe..6f4d400d2417 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -279,16 +279,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -323,27 +317,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index 2336bd2bf099..cd532cfa5589 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -280,16 +280,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -324,27 +318,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 4711f0be16d5..413362085175 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -278,19 +278,13 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -326,17 +320,11 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 _ [4]byte Ac_tgetime uint64 @@ -344,10 +332,22 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index ab99a34b9965..eaa37eb718e2 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -282,16 +282,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -326,27 +320,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 04c9866e3cf8..98ae6a1e4ac4 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -282,16 +282,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -326,27 +320,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 60aa69f618c7..cae1961594d2 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -278,19 +278,13 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -326,17 +320,11 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 _ [4]byte Ac_tgetime uint64 @@ -344,10 +332,22 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index cb4fad785d13..6ce3b4e02830 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -90,7 +90,7 @@ type Stat_t struct { Gid uint32 Rdev uint64 _ uint16 - _ [4]byte + _ [6]byte Size int64 Blksize int32 _ [4]byte @@ -285,19 +285,13 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]uint8 @@ -333,17 +327,11 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 _ [4]byte Ac_tgetime uint64 @@ -351,10 +339,22 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 60272cfce86b..c7429c6a1461 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -289,16 +289,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]uint8 @@ -333,27 +327,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 3f5b91bc0d50..4bf4baf4cac5 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -289,16 +289,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]uint8 @@ -333,27 +327,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 51550f15a637..e9709d70afbd 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -307,16 +307,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]uint8 @@ -351,27 +345,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 3239e50e0e22..fb44268ca7da 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -302,16 +302,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -346,27 +340,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index faf20027831a..9c38265c74af 100644 --- a/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/src/runtime/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -284,16 +284,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -328,27 +322,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/src/runtime/vendor/modules.txt b/src/runtime/vendor/modules.txt index 68591799e979..731d84000259 100644 --- a/src/runtime/vendor/modules.txt +++ b/src/runtime/vendor/modules.txt @@ -64,16 +64,20 @@ github.com/blang/semver/v4 # github.com/cespare/xxhash/v2 v2.3.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 -# github.com/cilium/ebpf v0.16.0 -## explicit; go 1.21 +# github.com/cilium/ebpf v0.19.0 +## explicit; go 1.23.0 github.com/cilium/ebpf github.com/cilium/ebpf/asm github.com/cilium/ebpf/btf github.com/cilium/ebpf/internal +github.com/cilium/ebpf/internal/efw github.com/cilium/ebpf/internal/kallsyms github.com/cilium/ebpf/internal/kconfig +github.com/cilium/ebpf/internal/linux +github.com/cilium/ebpf/internal/platform github.com/cilium/ebpf/internal/sys github.com/cilium/ebpf/internal/sysenc +github.com/cilium/ebpf/internal/testutils/testmain github.com/cilium/ebpf/internal/tracefs github.com/cilium/ebpf/internal/unix github.com/cilium/ebpf/link @@ -580,13 +584,10 @@ go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/internal/telemetry go.opentelemetry.io/otel/trace/noop -# golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b -## explicit; go 1.24.0 -golang.org/x/exp/constraints # golang.org/x/mod v0.27.0 ## explicit; go 1.23.0 golang.org/x/mod/semver -# golang.org/x/net v0.42.0 +# golang.org/x/net v0.43.0 ## explicit; go 1.23.0 golang.org/x/net/bpf golang.org/x/net/http/httpguts @@ -604,18 +605,20 @@ golang.org/x/oauth2/internal ## explicit; go 1.23.0 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.34.0 +# golang.org/x/sys v0.35.0 ## explicit; go 1.23.0 golang.org/x/sys/execabs golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/text v0.27.0 +# golang.org/x/text v0.28.0 ## explicit; go 1.23.0 golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm +# golang.org/x/tools v0.36.0 +## explicit; go 1.23.0 # google.golang.org/genproto v0.0.0-20250826171959-ef028d996bc1 ## explicit; go 1.24.0 google.golang.org/genproto/protobuf/field_mask