[feature] Inherit resource limits from cgroups (#1336)
When GTS is running in a container runtime which has configured CPU or memory limits or under an init system that uses cgroups to impose CPU and memory limits the values the Go runtime sees for GOMAXPROCS and GOMEMLIMIT are still based on the host resources, not the cgroup. At least for the throttling middlewares which use GOMAXPROCS to configure their queue size, this can result in GTS running with values too big compared to the resources that will actuall be available to it. This introduces 2 dependencies which can pick up resource contraints from the current cgroup and tune the Go runtime accordingly. This should result in the different queues being appropriately sized and in general more predictable performance. These dependencies are a no-op on non-Linux systems or if running in a cgroup that doesn't set a limit on CPU or memory. The automatic tuning of GOMEMLIMIT can be disabled by either explicitly setting GOMEMLIMIT yourself or by setting AUTOMEMLIMIT=off. The automatic tuning of GOMAXPROCS can similarly be counteracted by setting GOMAXPROCS yourself.
This commit is contained in:
parent
627b8eeae6
commit
acc333c40b
|
@ -33,6 +33,7 @@ import (
|
||||||
apiutil "github.com/superseriousbusiness/gotosocial/internal/api/util"
|
apiutil "github.com/superseriousbusiness/gotosocial/internal/api/util"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
|
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/middleware"
|
"github.com/superseriousbusiness/gotosocial/internal/middleware"
|
||||||
|
"go.uber.org/automaxprocs/maxprocs"
|
||||||
|
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/concurrency"
|
"github.com/superseriousbusiness/gotosocial/internal/concurrency"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/config"
|
"github.com/superseriousbusiness/gotosocial/internal/config"
|
||||||
|
@ -54,10 +55,18 @@ import (
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/transport"
|
"github.com/superseriousbusiness/gotosocial/internal/transport"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/typeutils"
|
"github.com/superseriousbusiness/gotosocial/internal/typeutils"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/web"
|
"github.com/superseriousbusiness/gotosocial/internal/web"
|
||||||
|
|
||||||
|
// Inherit memory limit if set from cgroup
|
||||||
|
_ "github.com/KimMachineGun/automemlimit"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Start creates and starts a gotosocial server
|
// Start creates and starts a gotosocial server
|
||||||
var Start action.GTSAction = func(ctx context.Context) error {
|
var Start action.GTSAction = func(ctx context.Context) error {
|
||||||
|
_, err := maxprocs.Set(maxprocs.Logger(log.Errorf))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to set CPU limits from cgroup: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
var state state.State
|
var state state.State
|
||||||
|
|
||||||
// Initialize caches
|
// Initialize caches
|
||||||
|
|
9
go.mod
9
go.mod
|
@ -13,6 +13,7 @@ require (
|
||||||
codeberg.org/gruf/go-mutexes v1.1.5
|
codeberg.org/gruf/go-mutexes v1.1.5
|
||||||
codeberg.org/gruf/go-runners v1.4.0
|
codeberg.org/gruf/go-runners v1.4.0
|
||||||
codeberg.org/gruf/go-store/v2 v2.2.1
|
codeberg.org/gruf/go-store/v2 v2.2.1
|
||||||
|
github.com/KimMachineGun/automemlimit v0.2.4
|
||||||
github.com/abema/go-mp4 v0.9.0
|
github.com/abema/go-mp4 v0.9.0
|
||||||
github.com/buckket/go-blurhash v1.1.0
|
github.com/buckket/go-blurhash v1.1.0
|
||||||
github.com/coreos/go-oidc/v3 v3.5.0
|
github.com/coreos/go-oidc/v3 v3.5.0
|
||||||
|
@ -49,6 +50,7 @@ require (
|
||||||
github.com/uptrace/bun/dialect/sqlitedialect v1.1.9
|
github.com/uptrace/bun/dialect/sqlitedialect v1.1.9
|
||||||
github.com/wagslane/go-password-validator v0.3.0
|
github.com/wagslane/go-password-validator v0.3.0
|
||||||
github.com/yuin/goldmark v1.5.3
|
github.com/yuin/goldmark v1.5.3
|
||||||
|
go.uber.org/automaxprocs v1.5.1
|
||||||
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90
|
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90
|
||||||
golang.org/x/exp v0.0.0-20220613132600-b0d781184e0d
|
golang.org/x/exp v0.0.0-20220613132600-b0d781184e0d
|
||||||
golang.org/x/image v0.3.0
|
golang.org/x/image v0.3.0
|
||||||
|
@ -75,7 +77,11 @@ require (
|
||||||
codeberg.org/gruf/go-pools v1.1.0 // indirect
|
codeberg.org/gruf/go-pools v1.1.0 // indirect
|
||||||
codeberg.org/gruf/go-sched v1.2.0 // indirect
|
codeberg.org/gruf/go-sched v1.2.0 // indirect
|
||||||
github.com/aymerick/douceur v0.2.0 // indirect
|
github.com/aymerick/douceur v0.2.0 // indirect
|
||||||
|
github.com/cilium/ebpf v0.4.0 // indirect
|
||||||
|
github.com/containerd/cgroups v1.0.4 // indirect
|
||||||
|
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
|
github.com/docker/go-units v0.4.0 // indirect
|
||||||
github.com/dsoprea/go-exif/v3 v3.0.0-20210625224831-a6301f85c82b // indirect
|
github.com/dsoprea/go-exif/v3 v3.0.0-20210625224831-a6301f85c82b // indirect
|
||||||
github.com/dsoprea/go-iptc v0.0.0-20200610044640-bc9ca208b413 // indirect
|
github.com/dsoprea/go-iptc v0.0.0-20200610044640-bc9ca208b413 // indirect
|
||||||
github.com/dsoprea/go-logging v0.0.0-20200710184922-b02d349568dd // indirect
|
github.com/dsoprea/go-logging v0.0.0-20200710184922-b02d349568dd // indirect
|
||||||
|
@ -91,6 +97,8 @@ require (
|
||||||
github.com/go-playground/universal-translator v0.18.0 // indirect
|
github.com/go-playground/universal-translator v0.18.0 // indirect
|
||||||
github.com/go-xmlfmt/xmlfmt v0.0.0-20211206191508-7fd73a941850 // indirect
|
github.com/go-xmlfmt/xmlfmt v0.0.0-20211206191508-7fd73a941850 // indirect
|
||||||
github.com/goccy/go-json v0.9.11 // indirect
|
github.com/goccy/go-json v0.9.11 // indirect
|
||||||
|
github.com/godbus/dbus/v5 v5.0.4 // indirect
|
||||||
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
|
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
|
||||||
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 // indirect
|
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 // indirect
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
|
@ -118,6 +126,7 @@ require (
|
||||||
github.com/minio/sha256-simd v1.0.0 // indirect
|
github.com/minio/sha256-simd v1.0.0 // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
|
github.com/opencontainers/runtime-spec v1.0.2 // indirect
|
||||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||||
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
|
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
|
|
25
go.sum
25
go.sum
|
@ -88,6 +88,8 @@ codeberg.org/gruf/go-store/v2 v2.2.1/go.mod h1:pxdyfSzau8fFs1TfZlyRzhDYvZWLaj1sX
|
||||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||||
|
github.com/KimMachineGun/automemlimit v0.2.4 h1:GBty8TK8k0aJer1Pq5/3Vdt2ef+YpLhcqNo+PSD5CoI=
|
||||||
|
github.com/KimMachineGun/automemlimit v0.2.4/go.mod h1:38QAnnnNhnFuAIW3+aPlaVUHqzE9buJYZK3m/jsra8E=
|
||||||
github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
|
github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
|
||||||
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
|
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
|
||||||
github.com/abema/go-mp4 v0.9.0 h1:WFkzn0J8uYTQ2MIWfgCaFHRB3VDkird5JncIjuuKjGI=
|
github.com/abema/go-mp4 v0.9.0 h1:WFkzn0J8uYTQ2MIWfgCaFHRB3VDkird5JncIjuuKjGI=
|
||||||
|
@ -105,6 +107,8 @@ github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgk
|
||||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||||
|
github.com/cilium/ebpf v0.4.0 h1:QlHdikaxALkqWasW8hAC1mfR0jdmvbfaBdBPFmRSglA=
|
||||||
|
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||||
|
@ -112,10 +116,14 @@ github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnht
|
||||||
github.com/cnf/structhash v0.0.0-20201127153200-e1b16c1ebc08 h1:ox2F0PSMlrAAiAdknSRMDrAr8mfxPCfSZolH+/qQnyQ=
|
github.com/cnf/structhash v0.0.0-20201127153200-e1b16c1ebc08 h1:ox2F0PSMlrAAiAdknSRMDrAr8mfxPCfSZolH+/qQnyQ=
|
||||||
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
|
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
|
||||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||||
|
github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA=
|
||||||
|
github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA=
|
||||||
github.com/coreos/go-oidc/v3 v3.5.0 h1:VxKtbccHZxs8juq7RdJntSqtXFtde9YpNpGn0yqgEHw=
|
github.com/coreos/go-oidc/v3 v3.5.0 h1:VxKtbccHZxs8juq7RdJntSqtXFtde9YpNpGn0yqgEHw=
|
||||||
github.com/coreos/go-oidc/v3 v3.5.0/go.mod h1:ecXRtV4romGPeO6ieExAsUK9cb/3fp9hXNz1tlv8PIM=
|
github.com/coreos/go-oidc/v3 v3.5.0/go.mod h1:ecXRtV4romGPeO6ieExAsUK9cb/3fp9hXNz1tlv8PIM=
|
||||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||||
|
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
|
||||||
|
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||||
github.com/cornelk/hashmap v1.0.8 h1:nv0AWgw02n+iDcawr5It4CjQIAcdMMKRrs10HOJYlrc=
|
github.com/cornelk/hashmap v1.0.8 h1:nv0AWgw02n+iDcawr5It4CjQIAcdMMKRrs10HOJYlrc=
|
||||||
github.com/cornelk/hashmap v1.0.8/go.mod h1:RfZb7JO3RviW/rT6emczVuC/oxpdz4UsSB2LJSclR1k=
|
github.com/cornelk/hashmap v1.0.8/go.mod h1:RfZb7JO3RviW/rT6emczVuC/oxpdz4UsSB2LJSclR1k=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
|
@ -127,6 +135,8 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
||||||
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
|
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
|
||||||
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
|
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
|
||||||
github.com/djherbis/atime v1.1.0/go.mod h1:28OF6Y8s3NQWwacXc5eZTsEsiMzp7LF8MbXE+XJPdBE=
|
github.com/djherbis/atime v1.1.0/go.mod h1:28OF6Y8s3NQWwacXc5eZTsEsiMzp7LF8MbXE+XJPdBE=
|
||||||
|
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||||
|
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||||
github.com/dsoprea/go-exif/v2 v2.0.0-20200321225314-640175a69fe4/go.mod h1:Lm2lMM2zx8p4a34ZemkaUV95AnMl4ZvLbCUbwOvLC2E=
|
github.com/dsoprea/go-exif/v2 v2.0.0-20200321225314-640175a69fe4/go.mod h1:Lm2lMM2zx8p4a34ZemkaUV95AnMl4ZvLbCUbwOvLC2E=
|
||||||
github.com/dsoprea/go-exif/v3 v3.0.0-20200717053412-08f1b6708903/go.mod h1:0nsO1ce0mh5czxGeLo4+OCZ/C6Eo6ZlMWsz7rH/Gxv8=
|
github.com/dsoprea/go-exif/v3 v3.0.0-20200717053412-08f1b6708903/go.mod h1:0nsO1ce0mh5czxGeLo4+OCZ/C6Eo6ZlMWsz7rH/Gxv8=
|
||||||
github.com/dsoprea/go-exif/v3 v3.0.0-20210428042052-dca55bf8ca15/go.mod h1:cg5SNYKHMmzxsr9X6ZeLh/nfBRHHp5PngtEPcujONtk=
|
github.com/dsoprea/go-exif/v3 v3.0.0-20210428042052-dca55bf8ca15/go.mod h1:cg5SNYKHMmzxsr9X6ZeLh/nfBRHHp5PngtEPcujONtk=
|
||||||
|
@ -156,6 +166,7 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
|
||||||
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
|
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
|
||||||
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
|
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
|
||||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||||
|
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
|
||||||
github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
|
github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||||
|
@ -207,8 +218,12 @@ github.com/go-xmlfmt/xmlfmt v0.0.0-20211206191508-7fd73a941850/go.mod h1:aUCEOzz
|
||||||
github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||||
github.com/goccy/go-json v0.9.11 h1:/pAaQDLHEoCq/5FFmSKBswWmK6H0e8g4159Kc/X/nqk=
|
github.com/goccy/go-json v0.9.11 h1:/pAaQDLHEoCq/5FFmSKBswWmK6H0e8g4159Kc/X/nqk=
|
||||||
github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||||
|
github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA=
|
||||||
|
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
|
github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
|
||||||
github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||||
|
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
|
github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
|
||||||
github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
|
github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
|
||||||
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
|
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
|
||||||
|
@ -371,6 +386,7 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV
|
||||||
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
|
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
|
||||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
|
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
|
||||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
|
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
|
||||||
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/klauspost/compress v1.10.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
github.com/klauspost/compress v1.10.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||||
github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||||
|
@ -440,6 +456,8 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108
|
||||||
github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0=
|
github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0=
|
||||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||||
|
github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0=
|
||||||
|
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||||
github.com/orcaman/writerseeker v0.0.0-20200621085525-1d3f536ff85e h1:s2RNOM/IGdY0Y6qfTeUKhDawdHDpK9RGBdx80qN4Ttw=
|
github.com/orcaman/writerseeker v0.0.0-20200621085525-1d3f536ff85e h1:s2RNOM/IGdY0Y6qfTeUKhDawdHDpK9RGBdx80qN4Ttw=
|
||||||
github.com/orcaman/writerseeker v0.0.0-20200621085525-1d3f536ff85e/go.mod h1:nBdnFKj15wFbf94Rwfq4m30eAcyY9V/IyKAGQFtqkW0=
|
github.com/orcaman/writerseeker v0.0.0-20200621085525-1d3f536ff85e/go.mod h1:nBdnFKj15wFbf94Rwfq4m30eAcyY9V/IyKAGQFtqkW0=
|
||||||
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
|
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
|
||||||
|
@ -454,6 +472,7 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
||||||
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
|
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
|
||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b h1:aUNXCGgukb4gtY99imuIeoh8Vr0GSwAlYxPAhqZrpFc=
|
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b h1:aUNXCGgukb4gtY99imuIeoh8Vr0GSwAlYxPAhqZrpFc=
|
||||||
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b/go.mod h1:wTPjTepVu7uJBYgZ0SdWHQlIas582j6cn2jgk4DDdlg=
|
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b/go.mod h1:wTPjTepVu7uJBYgZ0SdWHQlIas582j6cn2jgk4DDdlg=
|
||||||
|
@ -609,6 +628,9 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||||
|
go.uber.org/automaxprocs v1.5.1 h1:e1YG66Lrk73dn4qhg8WFSvhF0JuFQF0ERIp4rpuV8Qk=
|
||||||
|
go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU=
|
||||||
|
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
|
||||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||||
|
@ -789,6 +811,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
@ -870,6 +893,7 @@ golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roY
|
||||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||||
|
@ -878,6 +902,7 @@ golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4f
|
||||||
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||||
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
|
|
|
@ -0,0 +1,15 @@
|
||||||
|
# Binaries for programs and plugins
|
||||||
|
*.exe
|
||||||
|
*.exe~
|
||||||
|
*.dll
|
||||||
|
*.so
|
||||||
|
*.dylib
|
||||||
|
|
||||||
|
# Test binary, built with `go test -c`
|
||||||
|
*.test
|
||||||
|
|
||||||
|
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||||
|
*.out
|
||||||
|
|
||||||
|
# Dependency directories (remove the comment below to include it)
|
||||||
|
# vendor/
|
|
@ -0,0 +1,21 @@
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2022 Geon Kim
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
|
@ -0,0 +1,42 @@
|
||||||
|
# automemlimit
|
||||||
|
|
||||||
|
[![Go Reference](https://pkg.go.dev/badge/github.com/KimMachineGun/automemlimit.svg)](https://pkg.go.dev/github.com/KimMachineGun/automemlimit)
|
||||||
|
[![Go Report Card](https://goreportcard.com/badge/github.com/KimMachineGun/automemlimit)](https://goreportcard.com/report/github.com/KimMachineGun/automemlimit)
|
||||||
|
[![Test](https://github.com/KimMachineGun/automemlimit/actions/workflows/test.yml/badge.svg?branch=main)](https://github.com/KimMachineGun/automemlimit/actions/workflows/test.yml)
|
||||||
|
|
||||||
|
Automatically set `GOMEMLIMIT` to match Linux [cgroups(7)](https://man7.org/linux/man-pages/man7/cgroups.7.html) memory limit.
|
||||||
|
|
||||||
|
See more details about `GOMEMLIMIT` [here](https://tip.golang.org/doc/gc-guide#Memory_limit).
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
```shell
|
||||||
|
go get github.com/KimMachineGun/automemlimit@latest
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
// By default, it sets `GOMEMLIMIT` to 90% of cgroup's memory limit.
|
||||||
|
// You can find more details of its behavior from the doc comment of memlimit.SetGoMemLimitWithEnv.
|
||||||
|
import _ "github.com/KimMachineGun/automemlimit"
|
||||||
|
```
|
||||||
|
|
||||||
|
or
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import "github.com/KimMachineGun/automemlimit/memlimit"
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
memlimit.SetGoMemLimitWithEnv()
|
||||||
|
memlimit.SetGoMemLimit(0.9)
|
||||||
|
memlimit.SetGoMemLimitWithProvider(memlimit.Limit(1024*1024), 0.9)
|
||||||
|
memlimit.SetGoMemLimitWithProvider(memlimit.FromCgroup, 0.9)
|
||||||
|
memlimit.SetGoMemLimitWithProvider(memlimit.FromCgroupV1, 0.9)
|
||||||
|
memlimit.SetGoMemLimitWithProvider(memlimit.FromCgroupV2, 0.9)
|
||||||
|
}
|
||||||
|
```
|
|
@ -0,0 +1,9 @@
|
||||||
|
package automemlimit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/KimMachineGun/automemlimit/memlimit"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
memlimit.SetGoMemLimitWithEnv()
|
||||||
|
}
|
|
@ -0,0 +1,63 @@
|
||||||
|
//go:build linux
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package memlimit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/containerd/cgroups"
|
||||||
|
v2 "github.com/containerd/cgroups/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
cgroupMountPoint = "/sys/fs/cgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FromCgroup returns the memory limit based on the cgroups version on this system.
|
||||||
|
func FromCgroup() (uint64, error) {
|
||||||
|
switch cgroups.Mode() {
|
||||||
|
case cgroups.Legacy:
|
||||||
|
return FromCgroupV1()
|
||||||
|
case cgroups.Hybrid, cgroups.Unified:
|
||||||
|
return FromCgroupV2()
|
||||||
|
}
|
||||||
|
return 0, ErrNoCgroup
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromCgroupV1 returns the memory limit from the cgroup v1.
|
||||||
|
func FromCgroupV1() (uint64, error) {
|
||||||
|
cg, err := cgroups.Load(cgroups.SingleSubsystem(cgroups.V1, cgroups.Memory), cgroups.RootPath)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics, err := cg.Stat(cgroups.IgnoreNotExist)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
} else if metrics.Memory == nil {
|
||||||
|
return 0, ErrNoLimit
|
||||||
|
}
|
||||||
|
|
||||||
|
return metrics.Memory.HierarchicalMemoryLimit, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromCgroupV2 returns the memory limit from the cgroup v2.
|
||||||
|
func FromCgroupV2() (uint64, error) {
|
||||||
|
path, err := v2.NestedGroupPath("")
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err := v2.LoadManager(cgroupMountPoint, path)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
stats, err := m.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
} else if stats.Memory == nil {
|
||||||
|
return 0, ErrNoLimit
|
||||||
|
}
|
||||||
|
|
||||||
|
return stats.Memory.UsageLimit, nil
|
||||||
|
}
|
16
vendor/github.com/KimMachineGun/automemlimit/memlimit/cgroups_unsupported.go
generated
vendored
Normal file
16
vendor/github.com/KimMachineGun/automemlimit/memlimit/cgroups_unsupported.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
//go:build !linux
|
||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package memlimit
|
||||||
|
|
||||||
|
func FromCgroup() (uint64, error) {
|
||||||
|
return 0, ErrCgroupsNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func FromCgroupV1() (uint64, error) {
|
||||||
|
return 0, ErrCgroupsNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func FromCgroupV2() (uint64, error) {
|
||||||
|
return 0, ErrCgroupsNotSupported
|
||||||
|
}
|
105
vendor/github.com/KimMachineGun/automemlimit/memlimit/memlimit.go
generated
vendored
Normal file
105
vendor/github.com/KimMachineGun/automemlimit/memlimit/memlimit.go
generated
vendored
Normal file
|
@ -0,0 +1,105 @@
|
||||||
|
package memlimit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"runtime/debug"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
envGOMEMLIMIT = "GOMEMLIMIT"
|
||||||
|
envAUTOMEMLIMIT = "AUTOMEMLIMIT"
|
||||||
|
envAUTOMEMLIMIT_DEBUG = "AUTOMEMLIMIT_DEBUG"
|
||||||
|
|
||||||
|
defaultAUTOMEMLIMIT = 0.9
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrNoLimit is returned when the memory limit is not set.
|
||||||
|
ErrNoLimit = errors.New("memory is not limited")
|
||||||
|
// ErrNoCgroup is returned when the process is not in cgroup.
|
||||||
|
ErrNoCgroup = errors.New("process is not in cgroup")
|
||||||
|
// ErrCgroupsNotSupported is returned when the system does not support cgroups.
|
||||||
|
ErrCgroupsNotSupported = errors.New("cgroups is not supported on this system")
|
||||||
|
|
||||||
|
logger = log.New(io.Discard, "", log.LstdFlags)
|
||||||
|
)
|
||||||
|
|
||||||
|
// SetGoMemLimitWithEnv sets GOMEMLIMIT with the value from the environment variable.
|
||||||
|
// You can configure how much memory of the cgroup's memory limit to set as GOMEMLIMIT
|
||||||
|
// through AUTOMEMLIMIT in the half-open range (0.0,1.0].
|
||||||
|
//
|
||||||
|
// If AUTOMEMLIMIT is not set, it defaults to 0.9. (10% is the headroom for memory sources the Go runtime is unaware of.)
|
||||||
|
// If GOMEMLIMIT is already set or AUTOMEMLIMIT=off, this function does nothing.
|
||||||
|
func SetGoMemLimitWithEnv() {
|
||||||
|
if os.Getenv(envAUTOMEMLIMIT_DEBUG) == "true" {
|
||||||
|
logger = log.Default()
|
||||||
|
}
|
||||||
|
|
||||||
|
if val, ok := os.LookupEnv(envGOMEMLIMIT); ok {
|
||||||
|
logger.Printf("GOMEMLIMIT is set already, skipping: %s\n", val)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ratio := defaultAUTOMEMLIMIT
|
||||||
|
if val, ok := os.LookupEnv(envAUTOMEMLIMIT); ok {
|
||||||
|
if val == "off" {
|
||||||
|
logger.Printf("AUTOMEMLIMIT is set to off, skipping\n")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_ratio, err := strconv.ParseFloat(val, 64)
|
||||||
|
if err != nil {
|
||||||
|
logger.Printf("cannot parse AUTOMEMLIMIT: %s\n", val)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ratio = _ratio
|
||||||
|
}
|
||||||
|
if ratio <= 0 || ratio > 1 {
|
||||||
|
logger.Printf("invalid AUTOMEMLIMIT: %f\n", ratio)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
limit, err := SetGoMemLimit(ratio)
|
||||||
|
if err != nil {
|
||||||
|
logger.Printf("failed to set GOMEMLIMIT: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Printf("GOMEMLIMIT=%d\n", limit)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetGoMemLimit sets GOMEMLIMIT with the value from the cgroup's memory limit and given ratio.
|
||||||
|
func SetGoMemLimit(ratio float64) (int64, error) {
|
||||||
|
return SetGoMemLimitWithProvider(FromCgroup, ratio)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Provider is a function that returns the memory limit.
|
||||||
|
type Provider func() (uint64, error)
|
||||||
|
|
||||||
|
// SetGoMemLimitWithProvider sets GOMEMLIMIT with the value from the given provider and ratio.
|
||||||
|
func SetGoMemLimitWithProvider(provider Provider, ratio float64) (int64, error) {
|
||||||
|
limit, err := provider()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
goMemLimit := cappedFloat2Int(float64(limit) * ratio)
|
||||||
|
debug.SetMemoryLimit(goMemLimit)
|
||||||
|
return goMemLimit, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func cappedFloat2Int(f float64) int64 {
|
||||||
|
if f > math.MaxInt64 {
|
||||||
|
return math.MaxInt64
|
||||||
|
}
|
||||||
|
return int64(f)
|
||||||
|
}
|
||||||
|
// Limit is a helper Provider function that returns the given limit.
|
||||||
|
func Limit(limit uint64) func() (uint64, error) {
|
||||||
|
return func() (uint64, error) {
|
||||||
|
return limit, nil
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,17 @@
|
||||||
|
---
|
||||||
|
Language: Cpp
|
||||||
|
BasedOnStyle: LLVM
|
||||||
|
AlignAfterOpenBracket: DontAlign
|
||||||
|
AlignConsecutiveAssignments: true
|
||||||
|
AlignEscapedNewlines: DontAlign
|
||||||
|
AlwaysBreakBeforeMultilineStrings: true
|
||||||
|
AlwaysBreakTemplateDeclarations: false
|
||||||
|
AllowAllParametersOfDeclarationOnNextLine: false
|
||||||
|
AllowShortFunctionsOnASingleLine: false
|
||||||
|
BreakBeforeBraces: Attach
|
||||||
|
IndentWidth: 4
|
||||||
|
KeepEmptyLinesAtTheStartOfBlocks: false
|
||||||
|
TabWidth: 4
|
||||||
|
UseTab: ForContinuationAndIndentation
|
||||||
|
ColumnLimit: 1000
|
||||||
|
...
|
|
@ -0,0 +1,13 @@
|
||||||
|
# Binaries for programs and plugins
|
||||||
|
*.exe
|
||||||
|
*.exe~
|
||||||
|
*.dll
|
||||||
|
*.so
|
||||||
|
*.dylib
|
||||||
|
*.o
|
||||||
|
|
||||||
|
# Test binary, build with `go test -c`
|
||||||
|
*.test
|
||||||
|
|
||||||
|
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||||
|
*.out
|
|
@ -0,0 +1,80 @@
|
||||||
|
Architecture of the library
|
||||||
|
===
|
||||||
|
|
||||||
|
ELF -> Specifications -> Objects -> Links
|
||||||
|
|
||||||
|
ELF
|
||||||
|
---
|
||||||
|
|
||||||
|
BPF is usually produced by using Clang to compile a subset of C. Clang outputs
|
||||||
|
an ELF file which contains program byte code (aka BPF), but also metadata for
|
||||||
|
maps used by the program. The metadata follows the conventions set by libbpf
|
||||||
|
shipped with the kernel. Certain ELF sections have special meaning
|
||||||
|
and contain structures defined by libbpf. Newer versions of clang emit
|
||||||
|
additional metadata in BPF Type Format (aka BTF).
|
||||||
|
|
||||||
|
The library aims to be compatible with libbpf so that moving from a C toolchain
|
||||||
|
to a Go one creates little friction. To that end, the [ELF reader](elf_reader.go)
|
||||||
|
is tested against the Linux selftests and avoids introducing custom behaviour
|
||||||
|
if possible.
|
||||||
|
|
||||||
|
The output of the ELF reader is a `CollectionSpec` which encodes
|
||||||
|
all of the information contained in the ELF in a form that is easy to work with
|
||||||
|
in Go.
|
||||||
|
|
||||||
|
### BTF
|
||||||
|
|
||||||
|
The BPF Type Format describes more than just the types used by a BPF program. It
|
||||||
|
includes debug aids like which source line corresponds to which instructions and
|
||||||
|
what global variables are used.
|
||||||
|
|
||||||
|
[BTF parsing](internal/btf/) lives in a separate internal package since exposing
|
||||||
|
it would mean an additional maintenance burden, and because the API still
|
||||||
|
has sharp corners. The most important concept is the `btf.Type` interface, which
|
||||||
|
also describes things that aren't really types like `.rodata` or `.bss` sections.
|
||||||
|
`btf.Type`s can form cyclical graphs, which can easily lead to infinite loops if
|
||||||
|
one is not careful. Hopefully a safe pattern to work with `btf.Type` emerges as
|
||||||
|
we write more code that deals with it.
|
||||||
|
|
||||||
|
Specifications
|
||||||
|
---
|
||||||
|
|
||||||
|
`CollectionSpec`, `ProgramSpec` and `MapSpec` are blueprints for in-kernel
|
||||||
|
objects and contain everything necessary to execute the relevant `bpf(2)`
|
||||||
|
syscalls. Since the ELF reader outputs a `CollectionSpec` it's possible to
|
||||||
|
modify clang-compiled BPF code, for example to rewrite constants. At the same
|
||||||
|
time the [asm](asm/) package provides an assembler that can be used to generate
|
||||||
|
`ProgramSpec` on the fly.
|
||||||
|
|
||||||
|
Creating a spec should never require any privileges or be restricted in any way,
|
||||||
|
for example by only allowing programs in native endianness. This ensures that
|
||||||
|
the library stays flexible.
|
||||||
|
|
||||||
|
Objects
|
||||||
|
---
|
||||||
|
|
||||||
|
`Program` and `Map` are the result of loading specs into the kernel. Sometimes
|
||||||
|
loading a spec will fail because the kernel is too old, or a feature is not
|
||||||
|
enabled. There are multiple ways the library deals with that:
|
||||||
|
|
||||||
|
* Fallback: older kernels don't allowing naming programs and maps. The library
|
||||||
|
automatically detects support for names, and omits them during load if
|
||||||
|
necessary. This works since name is primarily a debug aid.
|
||||||
|
|
||||||
|
* Sentinel error: sometimes it's possible to detect that a feature isn't available.
|
||||||
|
In that case the library will return an error wrapping `ErrNotSupported`.
|
||||||
|
This is also useful to skip tests that can't run on the current kernel.
|
||||||
|
|
||||||
|
Once program and map objects are loaded they expose the kernel's low-level API,
|
||||||
|
e.g. `NextKey`. Often this API is awkward to use in Go, so there are safer
|
||||||
|
wrappers on top of the low-level API, like `MapIterator`. The low-level API is
|
||||||
|
useful as an out when our higher-level API doesn't support a particular use case.
|
||||||
|
|
||||||
|
Links
|
||||||
|
---
|
||||||
|
|
||||||
|
BPF can be attached to many different points in the kernel and newer BPF hooks
|
||||||
|
tend to use bpf_link to do so. Older hooks unfortunately use a combination of
|
||||||
|
syscalls, netlink messages, etc. Adding support for a new link type should not
|
||||||
|
pull in large dependencies like netlink, so XDP programs or tracepoints are
|
||||||
|
out of scope.
|
|
@ -0,0 +1,46 @@
|
||||||
|
# Contributor Covenant Code of Conduct
|
||||||
|
|
||||||
|
## Our Pledge
|
||||||
|
|
||||||
|
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
|
||||||
|
|
||||||
|
## Our Standards
|
||||||
|
|
||||||
|
Examples of behavior that contributes to creating a positive environment include:
|
||||||
|
|
||||||
|
* Using welcoming and inclusive language
|
||||||
|
* Being respectful of differing viewpoints and experiences
|
||||||
|
* Gracefully accepting constructive criticism
|
||||||
|
* Focusing on what is best for the community
|
||||||
|
* Showing empathy towards other community members
|
||||||
|
|
||||||
|
Examples of unacceptable behavior by participants include:
|
||||||
|
|
||||||
|
* The use of sexualized language or imagery and unwelcome sexual attention or advances
|
||||||
|
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||||
|
* Public or private harassment
|
||||||
|
* Publishing others' private information, such as a physical or electronic address, without explicit permission
|
||||||
|
* Other conduct which could reasonably be considered inappropriate in a professional setting
|
||||||
|
|
||||||
|
## Our Responsibilities
|
||||||
|
|
||||||
|
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
|
||||||
|
|
||||||
|
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
|
||||||
|
|
||||||
|
## Scope
|
||||||
|
|
||||||
|
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
|
||||||
|
|
||||||
|
## Enforcement
|
||||||
|
|
||||||
|
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at nathanjsweet at gmail dot com or i at lmb dot io. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
|
||||||
|
|
||||||
|
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
|
||||||
|
|
||||||
|
## Attribution
|
||||||
|
|
||||||
|
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
|
||||||
|
|
||||||
|
[homepage]: http://contributor-covenant.org
|
||||||
|
[version]: http://contributor-covenant.org/version/1/4/
|
|
@ -0,0 +1,23 @@
|
||||||
|
# How to contribute
|
||||||
|
|
||||||
|
Development is on [GitHub](https://github.com/cilium/ebpf) and contributions in
|
||||||
|
the form of pull requests and issues reporting bugs or suggesting new features
|
||||||
|
are welcome. Please take a look at [the architecture](ARCHITECTURE.md) to get
|
||||||
|
a better understanding for the high-level goals.
|
||||||
|
|
||||||
|
New features must be accompanied by tests. Before starting work on any large
|
||||||
|
feature, please [join](https://cilium.herokuapp.com/) the
|
||||||
|
[#libbpf-go](https://cilium.slack.com/messages/libbpf-go) channel on Slack to
|
||||||
|
discuss the design first.
|
||||||
|
|
||||||
|
When submitting pull requests, consider writing details about what problem you
|
||||||
|
are solving and why the proposed approach solves that problem in commit messages
|
||||||
|
and/or pull request description to help future library users and maintainers to
|
||||||
|
reason about the proposed changes.
|
||||||
|
|
||||||
|
## Running the tests
|
||||||
|
|
||||||
|
Many of the tests require privileges to set resource limits and load eBPF code.
|
||||||
|
The easiest way to obtain these is to run the tests with `sudo`:
|
||||||
|
|
||||||
|
sudo go test ./...
|
|
@ -0,0 +1,23 @@
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2017 Nathan Sweet
|
||||||
|
Copyright (c) 2018, 2019 Cloudflare
|
||||||
|
Copyright (c) 2019 Authors of Cilium
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
|
@ -0,0 +1,67 @@
|
||||||
|
# The development version of clang is distributed as the 'clang' binary,
|
||||||
|
# while stable/released versions have a version number attached.
|
||||||
|
# Pin the default clang to a stable version.
|
||||||
|
CLANG ?= clang-11
|
||||||
|
CFLAGS := -target bpf -O2 -g -Wall -Werror $(CFLAGS)
|
||||||
|
|
||||||
|
# Obtain an absolute path to the directory of the Makefile.
|
||||||
|
# Assume the Makefile is in the root of the repository.
|
||||||
|
REPODIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
|
||||||
|
UIDGID := $(shell stat -c '%u:%g' ${REPODIR})
|
||||||
|
|
||||||
|
IMAGE := $(shell cat ${REPODIR}/testdata/docker/IMAGE)
|
||||||
|
VERSION := $(shell cat ${REPODIR}/testdata/docker/VERSION)
|
||||||
|
|
||||||
|
# clang <8 doesn't tag relocs properly (STT_NOTYPE)
|
||||||
|
# clang 9 is the first version emitting BTF
|
||||||
|
TARGETS := \
|
||||||
|
testdata/loader-clang-7 \
|
||||||
|
testdata/loader-clang-9 \
|
||||||
|
testdata/loader-clang-11 \
|
||||||
|
testdata/invalid_map \
|
||||||
|
testdata/raw_tracepoint \
|
||||||
|
testdata/invalid_map_static \
|
||||||
|
testdata/initialized_btf_map \
|
||||||
|
testdata/strings \
|
||||||
|
internal/btf/testdata/relocs
|
||||||
|
|
||||||
|
.PHONY: all clean docker-all docker-shell
|
||||||
|
|
||||||
|
.DEFAULT_TARGET = docker-all
|
||||||
|
|
||||||
|
# Build all ELF binaries using a Dockerized LLVM toolchain.
|
||||||
|
docker-all:
|
||||||
|
docker run --rm --user "${UIDGID}" \
|
||||||
|
-v "${REPODIR}":/ebpf -w /ebpf --env MAKEFLAGS \
|
||||||
|
"${IMAGE}:${VERSION}" \
|
||||||
|
make all
|
||||||
|
|
||||||
|
# (debug) Drop the user into a shell inside the Docker container as root.
|
||||||
|
docker-shell:
|
||||||
|
docker run --rm -ti \
|
||||||
|
-v "${REPODIR}":/ebpf -w /ebpf \
|
||||||
|
"${IMAGE}:${VERSION}"
|
||||||
|
|
||||||
|
clean:
|
||||||
|
-$(RM) testdata/*.elf
|
||||||
|
-$(RM) internal/btf/testdata/*.elf
|
||||||
|
|
||||||
|
all: $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS))
|
||||||
|
|
||||||
|
testdata/loader-%-el.elf: testdata/loader.c
|
||||||
|
$* $(CFLAGS) -mlittle-endian -c $< -o $@
|
||||||
|
|
||||||
|
testdata/loader-%-eb.elf: testdata/loader.c
|
||||||
|
$* $(CFLAGS) -mbig-endian -c $< -o $@
|
||||||
|
|
||||||
|
%-el.elf: %.c
|
||||||
|
$(CLANG) $(CFLAGS) -mlittle-endian -c $< -o $@
|
||||||
|
|
||||||
|
%-eb.elf : %.c
|
||||||
|
$(CLANG) $(CFLAGS) -mbig-endian -c $< -o $@
|
||||||
|
|
||||||
|
# Usage: make VMLINUX=/path/to/vmlinux vmlinux-btf
|
||||||
|
.PHONY: vmlinux-btf
|
||||||
|
vmlinux-btf: internal/btf/testdata/vmlinux-btf.gz
|
||||||
|
internal/btf/testdata/vmlinux-btf.gz: $(VMLINUX)
|
||||||
|
objcopy --dump-section .BTF=/dev/stdout "$<" /dev/null | gzip > "$@"
|
|
@ -0,0 +1,54 @@
|
||||||
|
# eBPF
|
||||||
|
|
||||||
|
[![PkgGoDev](https://pkg.go.dev/badge/github.com/cilium/ebpf)](https://pkg.go.dev/github.com/cilium/ebpf)
|
||||||
|
|
||||||
|
eBPF is a pure Go library that provides utilities for loading, compiling, and
|
||||||
|
debugging eBPF programs. It has minimal external dependencies and is intended to
|
||||||
|
be used in long running processes.
|
||||||
|
|
||||||
|
* [asm](https://pkg.go.dev/github.com/cilium/ebpf/asm) contains a basic
|
||||||
|
assembler
|
||||||
|
* [link](https://pkg.go.dev/github.com/cilium/ebpf/link) allows attaching eBPF
|
||||||
|
to various hooks
|
||||||
|
* [perf](https://pkg.go.dev/github.com/cilium/ebpf/perf) allows reading from a
|
||||||
|
`PERF_EVENT_ARRAY`
|
||||||
|
* [cmd/bpf2go](https://pkg.go.dev/github.com/cilium/ebpf/cmd/bpf2go) allows
|
||||||
|
embedding eBPF in Go
|
||||||
|
|
||||||
|
The library is maintained by [Cloudflare](https://www.cloudflare.com) and
|
||||||
|
[Cilium](https://www.cilium.io). Feel free to
|
||||||
|
[join](https://cilium.herokuapp.com/) the
|
||||||
|
[#libbpf-go](https://cilium.slack.com/messages/libbpf-go) channel on Slack.
|
||||||
|
|
||||||
|
## Current status
|
||||||
|
|
||||||
|
The package is production ready, but **the API is explicitly unstable right
|
||||||
|
now**. Expect to update your code if you want to follow along.
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
* A version of Go that is [supported by
|
||||||
|
upstream](https://golang.org/doc/devel/release.html#policy)
|
||||||
|
* Linux 4.9, 4.19 or 5.4 (versions in-between should work, but are not tested)
|
||||||
|
|
||||||
|
## Useful resources
|
||||||
|
|
||||||
|
* [eBPF.io](https://ebpf.io) (recommended)
|
||||||
|
* [Cilium eBPF documentation](https://docs.cilium.io/en/latest/bpf/#bpf-guide)
|
||||||
|
(recommended)
|
||||||
|
* [Linux documentation on
|
||||||
|
BPF](https://www.kernel.org/doc/html/latest/networking/filter.html)
|
||||||
|
* [eBPF features by Linux
|
||||||
|
version](https://github.com/iovisor/bcc/blob/master/docs/kernel-versions.md)
|
||||||
|
|
||||||
|
## Regenerating Testdata
|
||||||
|
|
||||||
|
Run `make` in the root of this repository to rebuild testdata in all
|
||||||
|
subpackages. This requires Docker, as it relies on a standardized build
|
||||||
|
environment to keep the build output stable.
|
||||||
|
|
||||||
|
The toolchain image build files are kept in [testdata/docker/](testdata/docker/).
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
MIT
|
|
@ -0,0 +1,149 @@
|
||||||
|
package asm
|
||||||
|
|
||||||
|
//go:generate stringer -output alu_string.go -type=Source,Endianness,ALUOp
|
||||||
|
|
||||||
|
// Source of ALU / ALU64 / Branch operations
|
||||||
|
//
|
||||||
|
// msb lsb
|
||||||
|
// +----+-+---+
|
||||||
|
// |op |S|cls|
|
||||||
|
// +----+-+---+
|
||||||
|
type Source uint8
|
||||||
|
|
||||||
|
const sourceMask OpCode = 0x08
|
||||||
|
|
||||||
|
// Source bitmask
|
||||||
|
const (
|
||||||
|
// InvalidSource is returned by getters when invoked
|
||||||
|
// on non ALU / branch OpCodes.
|
||||||
|
InvalidSource Source = 0xff
|
||||||
|
// ImmSource src is from constant
|
||||||
|
ImmSource Source = 0x00
|
||||||
|
// RegSource src is from register
|
||||||
|
RegSource Source = 0x08
|
||||||
|
)
|
||||||
|
|
||||||
|
// The Endianness of a byte swap instruction.
|
||||||
|
type Endianness uint8
|
||||||
|
|
||||||
|
const endianMask = sourceMask
|
||||||
|
|
||||||
|
// Endian flags
|
||||||
|
const (
|
||||||
|
InvalidEndian Endianness = 0xff
|
||||||
|
// Convert to little endian
|
||||||
|
LE Endianness = 0x00
|
||||||
|
// Convert to big endian
|
||||||
|
BE Endianness = 0x08
|
||||||
|
)
|
||||||
|
|
||||||
|
// ALUOp are ALU / ALU64 operations
|
||||||
|
//
|
||||||
|
// msb lsb
|
||||||
|
// +----+-+---+
|
||||||
|
// |OP |s|cls|
|
||||||
|
// +----+-+---+
|
||||||
|
type ALUOp uint8
|
||||||
|
|
||||||
|
const aluMask OpCode = 0xf0
|
||||||
|
|
||||||
|
const (
|
||||||
|
// InvalidALUOp is returned by getters when invoked
|
||||||
|
// on non ALU OpCodes
|
||||||
|
InvalidALUOp ALUOp = 0xff
|
||||||
|
// Add - addition
|
||||||
|
Add ALUOp = 0x00
|
||||||
|
// Sub - subtraction
|
||||||
|
Sub ALUOp = 0x10
|
||||||
|
// Mul - multiplication
|
||||||
|
Mul ALUOp = 0x20
|
||||||
|
// Div - division
|
||||||
|
Div ALUOp = 0x30
|
||||||
|
// Or - bitwise or
|
||||||
|
Or ALUOp = 0x40
|
||||||
|
// And - bitwise and
|
||||||
|
And ALUOp = 0x50
|
||||||
|
// LSh - bitwise shift left
|
||||||
|
LSh ALUOp = 0x60
|
||||||
|
// RSh - bitwise shift right
|
||||||
|
RSh ALUOp = 0x70
|
||||||
|
// Neg - sign/unsign signing bit
|
||||||
|
Neg ALUOp = 0x80
|
||||||
|
// Mod - modulo
|
||||||
|
Mod ALUOp = 0x90
|
||||||
|
// Xor - bitwise xor
|
||||||
|
Xor ALUOp = 0xa0
|
||||||
|
// Mov - move value from one place to another
|
||||||
|
Mov ALUOp = 0xb0
|
||||||
|
// ArSh - arithmatic shift
|
||||||
|
ArSh ALUOp = 0xc0
|
||||||
|
// Swap - endian conversions
|
||||||
|
Swap ALUOp = 0xd0
|
||||||
|
)
|
||||||
|
|
||||||
|
// HostTo converts from host to another endianness.
|
||||||
|
func HostTo(endian Endianness, dst Register, size Size) Instruction {
|
||||||
|
var imm int64
|
||||||
|
switch size {
|
||||||
|
case Half:
|
||||||
|
imm = 16
|
||||||
|
case Word:
|
||||||
|
imm = 32
|
||||||
|
case DWord:
|
||||||
|
imm = 64
|
||||||
|
default:
|
||||||
|
return Instruction{OpCode: InvalidOpCode}
|
||||||
|
}
|
||||||
|
|
||||||
|
return Instruction{
|
||||||
|
OpCode: OpCode(ALUClass).SetALUOp(Swap).SetSource(Source(endian)),
|
||||||
|
Dst: dst,
|
||||||
|
Constant: imm,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Op returns the OpCode for an ALU operation with a given source.
|
||||||
|
func (op ALUOp) Op(source Source) OpCode {
|
||||||
|
return OpCode(ALU64Class).SetALUOp(op).SetSource(source)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reg emits `dst (op) src`.
|
||||||
|
func (op ALUOp) Reg(dst, src Register) Instruction {
|
||||||
|
return Instruction{
|
||||||
|
OpCode: op.Op(RegSource),
|
||||||
|
Dst: dst,
|
||||||
|
Src: src,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Imm emits `dst (op) value`.
|
||||||
|
func (op ALUOp) Imm(dst Register, value int32) Instruction {
|
||||||
|
return Instruction{
|
||||||
|
OpCode: op.Op(ImmSource),
|
||||||
|
Dst: dst,
|
||||||
|
Constant: int64(value),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Op32 returns the OpCode for a 32-bit ALU operation with a given source.
|
||||||
|
func (op ALUOp) Op32(source Source) OpCode {
|
||||||
|
return OpCode(ALUClass).SetALUOp(op).SetSource(source)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reg32 emits `dst (op) src`, zeroing the upper 32 bit of dst.
|
||||||
|
func (op ALUOp) Reg32(dst, src Register) Instruction {
|
||||||
|
return Instruction{
|
||||||
|
OpCode: op.Op32(RegSource),
|
||||||
|
Dst: dst,
|
||||||
|
Src: src,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Imm32 emits `dst (op) value`, zeroing the upper 32 bit of dst.
|
||||||
|
func (op ALUOp) Imm32(dst Register, value int32) Instruction {
|
||||||
|
return Instruction{
|
||||||
|
OpCode: op.Op32(ImmSource),
|
||||||
|
Dst: dst,
|
||||||
|
Constant: int64(value),
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,107 @@
|
||||||
|
// Code generated by "stringer -output alu_string.go -type=Source,Endianness,ALUOp"; DO NOT EDIT.
|
||||||
|
|
||||||
|
package asm
|
||||||
|
|
||||||
|
import "strconv"
|
||||||
|
|
||||||
|
func _() {
|
||||||
|
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||||
|
// Re-run the stringer command to generate them again.
|
||||||
|
var x [1]struct{}
|
||||||
|
_ = x[InvalidSource-255]
|
||||||
|
_ = x[ImmSource-0]
|
||||||
|
_ = x[RegSource-8]
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
_Source_name_0 = "ImmSource"
|
||||||
|
_Source_name_1 = "RegSource"
|
||||||
|
_Source_name_2 = "InvalidSource"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (i Source) String() string {
|
||||||
|
switch {
|
||||||
|
case i == 0:
|
||||||
|
return _Source_name_0
|
||||||
|
case i == 8:
|
||||||
|
return _Source_name_1
|
||||||
|
case i == 255:
|
||||||
|
return _Source_name_2
|
||||||
|
default:
|
||||||
|
return "Source(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func _() {
|
||||||
|
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||||
|
// Re-run the stringer command to generate them again.
|
||||||
|
var x [1]struct{}
|
||||||
|
_ = x[InvalidEndian-255]
|
||||||
|
_ = x[LE-0]
|
||||||
|
_ = x[BE-8]
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
_Endianness_name_0 = "LE"
|
||||||
|
_Endianness_name_1 = "BE"
|
||||||
|
_Endianness_name_2 = "InvalidEndian"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (i Endianness) String() string {
|
||||||
|
switch {
|
||||||
|
case i == 0:
|
||||||
|
return _Endianness_name_0
|
||||||
|
case i == 8:
|
||||||
|
return _Endianness_name_1
|
||||||
|
case i == 255:
|
||||||
|
return _Endianness_name_2
|
||||||
|
default:
|
||||||
|
return "Endianness(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func _() {
|
||||||
|
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||||
|
// Re-run the stringer command to generate them again.
|
||||||
|
var x [1]struct{}
|
||||||
|
_ = x[InvalidALUOp-255]
|
||||||
|
_ = x[Add-0]
|
||||||
|
_ = x[Sub-16]
|
||||||
|
_ = x[Mul-32]
|
||||||
|
_ = x[Div-48]
|
||||||
|
_ = x[Or-64]
|
||||||
|
_ = x[And-80]
|
||||||
|
_ = x[LSh-96]
|
||||||
|
_ = x[RSh-112]
|
||||||
|
_ = x[Neg-128]
|
||||||
|
_ = x[Mod-144]
|
||||||
|
_ = x[Xor-160]
|
||||||
|
_ = x[Mov-176]
|
||||||
|
_ = x[ArSh-192]
|
||||||
|
_ = x[Swap-208]
|
||||||
|
}
|
||||||
|
|
||||||
|
const _ALUOp_name = "AddSubMulDivOrAndLShRShNegModXorMovArShSwapInvalidALUOp"
|
||||||
|
|
||||||
|
var _ALUOp_map = map[ALUOp]string{
|
||||||
|
0: _ALUOp_name[0:3],
|
||||||
|
16: _ALUOp_name[3:6],
|
||||||
|
32: _ALUOp_name[6:9],
|
||||||
|
48: _ALUOp_name[9:12],
|
||||||
|
64: _ALUOp_name[12:14],
|
||||||
|
80: _ALUOp_name[14:17],
|
||||||
|
96: _ALUOp_name[17:20],
|
||||||
|
112: _ALUOp_name[20:23],
|
||||||
|
128: _ALUOp_name[23:26],
|
||||||
|
144: _ALUOp_name[26:29],
|
||||||
|
160: _ALUOp_name[29:32],
|
||||||
|
176: _ALUOp_name[32:35],
|
||||||
|
192: _ALUOp_name[35:39],
|
||||||
|
208: _ALUOp_name[39:43],
|
||||||
|
255: _ALUOp_name[43:55],
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i ALUOp) String() string {
|
||||||
|
if str, ok := _ALUOp_map[i]; ok {
|
||||||
|
return str
|
||||||
|
}
|
||||||
|
return "ALUOp(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||||
|
}
|
|
@ -0,0 +1,2 @@
|
||||||
|
// Package asm is an assembler for eBPF bytecode.
|
||||||
|
package asm
|
|
@ -0,0 +1,143 @@
|
||||||
|
package asm
|
||||||
|
|
||||||
|
//go:generate stringer -output func_string.go -type=BuiltinFunc
|
||||||
|
|
||||||
|
// BuiltinFunc is a built-in eBPF function.
|
||||||
|
type BuiltinFunc int32
|
||||||
|
|
||||||
|
// eBPF built-in functions
|
||||||
|
//
|
||||||
|
// You can renegerate this list using the following gawk script:
|
||||||
|
//
|
||||||
|
// /FN\(.+\),/ {
|
||||||
|
// match($1, /\((.+)\)/, r)
|
||||||
|
// split(r[1], p, "_")
|
||||||
|
// printf "Fn"
|
||||||
|
// for (i in p) {
|
||||||
|
// printf "%s%s", toupper(substr(p[i], 1, 1)), substr(p[i], 2)
|
||||||
|
// }
|
||||||
|
// print ""
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// The script expects include/uapi/linux/bpf.h as it's input.
|
||||||
|
const (
|
||||||
|
FnUnspec BuiltinFunc = iota
|
||||||
|
FnMapLookupElem
|
||||||
|
FnMapUpdateElem
|
||||||
|
FnMapDeleteElem
|
||||||
|
FnProbeRead
|
||||||
|
FnKtimeGetNs
|
||||||
|
FnTracePrintk
|
||||||
|
FnGetPrandomU32
|
||||||
|
FnGetSmpProcessorId
|
||||||
|
FnSkbStoreBytes
|
||||||
|
FnL3CsumReplace
|
||||||
|
FnL4CsumReplace
|
||||||
|
FnTailCall
|
||||||
|
FnCloneRedirect
|
||||||
|
FnGetCurrentPidTgid
|
||||||
|
FnGetCurrentUidGid
|
||||||
|
FnGetCurrentComm
|
||||||
|
FnGetCgroupClassid
|
||||||
|
FnSkbVlanPush
|
||||||
|
FnSkbVlanPop
|
||||||
|
FnSkbGetTunnelKey
|
||||||
|
FnSkbSetTunnelKey
|
||||||
|
FnPerfEventRead
|
||||||
|
FnRedirect
|
||||||
|
FnGetRouteRealm
|
||||||
|
FnPerfEventOutput
|
||||||
|
FnSkbLoadBytes
|
||||||
|
FnGetStackid
|
||||||
|
FnCsumDiff
|
||||||
|
FnSkbGetTunnelOpt
|
||||||
|
FnSkbSetTunnelOpt
|
||||||
|
FnSkbChangeProto
|
||||||
|
FnSkbChangeType
|
||||||
|
FnSkbUnderCgroup
|
||||||
|
FnGetHashRecalc
|
||||||
|
FnGetCurrentTask
|
||||||
|
FnProbeWriteUser
|
||||||
|
FnCurrentTaskUnderCgroup
|
||||||
|
FnSkbChangeTail
|
||||||
|
FnSkbPullData
|
||||||
|
FnCsumUpdate
|
||||||
|
FnSetHashInvalid
|
||||||
|
FnGetNumaNodeId
|
||||||
|
FnSkbChangeHead
|
||||||
|
FnXdpAdjustHead
|
||||||
|
FnProbeReadStr
|
||||||
|
FnGetSocketCookie
|
||||||
|
FnGetSocketUid
|
||||||
|
FnSetHash
|
||||||
|
FnSetsockopt
|
||||||
|
FnSkbAdjustRoom
|
||||||
|
FnRedirectMap
|
||||||
|
FnSkRedirectMap
|
||||||
|
FnSockMapUpdate
|
||||||
|
FnXdpAdjustMeta
|
||||||
|
FnPerfEventReadValue
|
||||||
|
FnPerfProgReadValue
|
||||||
|
FnGetsockopt
|
||||||
|
FnOverrideReturn
|
||||||
|
FnSockOpsCbFlagsSet
|
||||||
|
FnMsgRedirectMap
|
||||||
|
FnMsgApplyBytes
|
||||||
|
FnMsgCorkBytes
|
||||||
|
FnMsgPullData
|
||||||
|
FnBind
|
||||||
|
FnXdpAdjustTail
|
||||||
|
FnSkbGetXfrmState
|
||||||
|
FnGetStack
|
||||||
|
FnSkbLoadBytesRelative
|
||||||
|
FnFibLookup
|
||||||
|
FnSockHashUpdate
|
||||||
|
FnMsgRedirectHash
|
||||||
|
FnSkRedirectHash
|
||||||
|
FnLwtPushEncap
|
||||||
|
FnLwtSeg6StoreBytes
|
||||||
|
FnLwtSeg6AdjustSrh
|
||||||
|
FnLwtSeg6Action
|
||||||
|
FnRcRepeat
|
||||||
|
FnRcKeydown
|
||||||
|
FnSkbCgroupId
|
||||||
|
FnGetCurrentCgroupId
|
||||||
|
FnGetLocalStorage
|
||||||
|
FnSkSelectReuseport
|
||||||
|
FnSkbAncestorCgroupId
|
||||||
|
FnSkLookupTcp
|
||||||
|
FnSkLookupUdp
|
||||||
|
FnSkRelease
|
||||||
|
FnMapPushElem
|
||||||
|
FnMapPopElem
|
||||||
|
FnMapPeekElem
|
||||||
|
FnMsgPushData
|
||||||
|
FnMsgPopData
|
||||||
|
FnRcPointerRel
|
||||||
|
FnSpinLock
|
||||||
|
FnSpinUnlock
|
||||||
|
FnSkFullsock
|
||||||
|
FnTcpSock
|
||||||
|
FnSkbEcnSetCe
|
||||||
|
FnGetListenerSock
|
||||||
|
FnSkcLookupTcp
|
||||||
|
FnTcpCheckSyncookie
|
||||||
|
FnSysctlGetName
|
||||||
|
FnSysctlGetCurrentValue
|
||||||
|
FnSysctlGetNewValue
|
||||||
|
FnSysctlSetNewValue
|
||||||
|
FnStrtol
|
||||||
|
FnStrtoul
|
||||||
|
FnSkStorageGet
|
||||||
|
FnSkStorageDelete
|
||||||
|
FnSendSignal
|
||||||
|
FnTcpGenSyncookie
|
||||||
|
)
|
||||||
|
|
||||||
|
// Call emits a function call.
|
||||||
|
func (fn BuiltinFunc) Call() Instruction {
|
||||||
|
return Instruction{
|
||||||
|
OpCode: OpCode(JumpClass).SetJumpOp(Call),
|
||||||
|
Constant: int64(fn),
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,133 @@
|
||||||
|
// Code generated by "stringer -output func_string.go -type=BuiltinFunc"; DO NOT EDIT.
|
||||||
|
|
||||||
|
package asm
|
||||||
|
|
||||||
|
import "strconv"
|
||||||
|
|
||||||
|
func _() {
|
||||||
|
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||||
|
// Re-run the stringer command to generate them again.
|
||||||
|
var x [1]struct{}
|
||||||
|
_ = x[FnUnspec-0]
|
||||||
|
_ = x[FnMapLookupElem-1]
|
||||||
|
_ = x[FnMapUpdateElem-2]
|
||||||
|
_ = x[FnMapDeleteElem-3]
|
||||||
|
_ = x[FnProbeRead-4]
|
||||||
|
_ = x[FnKtimeGetNs-5]
|
||||||
|
_ = x[FnTracePrintk-6]
|
||||||
|
_ = x[FnGetPrandomU32-7]
|
||||||
|
_ = x[FnGetSmpProcessorId-8]
|
||||||
|
_ = x[FnSkbStoreBytes-9]
|
||||||
|
_ = x[FnL3CsumReplace-10]
|
||||||
|
_ = x[FnL4CsumReplace-11]
|
||||||
|
_ = x[FnTailCall-12]
|
||||||
|
_ = x[FnCloneRedirect-13]
|
||||||
|
_ = x[FnGetCurrentPidTgid-14]
|
||||||
|
_ = x[FnGetCurrentUidGid-15]
|
||||||
|
_ = x[FnGetCurrentComm-16]
|
||||||
|
_ = x[FnGetCgroupClassid-17]
|
||||||
|
_ = x[FnSkbVlanPush-18]
|
||||||
|
_ = x[FnSkbVlanPop-19]
|
||||||
|
_ = x[FnSkbGetTunnelKey-20]
|
||||||
|
_ = x[FnSkbSetTunnelKey-21]
|
||||||
|
_ = x[FnPerfEventRead-22]
|
||||||
|
_ = x[FnRedirect-23]
|
||||||
|
_ = x[FnGetRouteRealm-24]
|
||||||
|
_ = x[FnPerfEventOutput-25]
|
||||||
|
_ = x[FnSkbLoadBytes-26]
|
||||||
|
_ = x[FnGetStackid-27]
|
||||||
|
_ = x[FnCsumDiff-28]
|
||||||
|
_ = x[FnSkbGetTunnelOpt-29]
|
||||||
|
_ = x[FnSkbSetTunnelOpt-30]
|
||||||
|
_ = x[FnSkbChangeProto-31]
|
||||||
|
_ = x[FnSkbChangeType-32]
|
||||||
|
_ = x[FnSkbUnderCgroup-33]
|
||||||
|
_ = x[FnGetHashRecalc-34]
|
||||||
|
_ = x[FnGetCurrentTask-35]
|
||||||
|
_ = x[FnProbeWriteUser-36]
|
||||||
|
_ = x[FnCurrentTaskUnderCgroup-37]
|
||||||
|
_ = x[FnSkbChangeTail-38]
|
||||||
|
_ = x[FnSkbPullData-39]
|
||||||
|
_ = x[FnCsumUpdate-40]
|
||||||
|
_ = x[FnSetHashInvalid-41]
|
||||||
|
_ = x[FnGetNumaNodeId-42]
|
||||||
|
_ = x[FnSkbChangeHead-43]
|
||||||
|
_ = x[FnXdpAdjustHead-44]
|
||||||
|
_ = x[FnProbeReadStr-45]
|
||||||
|
_ = x[FnGetSocketCookie-46]
|
||||||
|
_ = x[FnGetSocketUid-47]
|
||||||
|
_ = x[FnSetHash-48]
|
||||||
|
_ = x[FnSetsockopt-49]
|
||||||
|
_ = x[FnSkbAdjustRoom-50]
|
||||||
|
_ = x[FnRedirectMap-51]
|
||||||
|
_ = x[FnSkRedirectMap-52]
|
||||||
|
_ = x[FnSockMapUpdate-53]
|
||||||
|
_ = x[FnXdpAdjustMeta-54]
|
||||||
|
_ = x[FnPerfEventReadValue-55]
|
||||||
|
_ = x[FnPerfProgReadValue-56]
|
||||||
|
_ = x[FnGetsockopt-57]
|
||||||
|
_ = x[FnOverrideReturn-58]
|
||||||
|
_ = x[FnSockOpsCbFlagsSet-59]
|
||||||
|
_ = x[FnMsgRedirectMap-60]
|
||||||
|
_ = x[FnMsgApplyBytes-61]
|
||||||
|
_ = x[FnMsgCorkBytes-62]
|
||||||
|
_ = x[FnMsgPullData-63]
|
||||||
|
_ = x[FnBind-64]
|
||||||
|
_ = x[FnXdpAdjustTail-65]
|
||||||
|
_ = x[FnSkbGetXfrmState-66]
|
||||||
|
_ = x[FnGetStack-67]
|
||||||
|
_ = x[FnSkbLoadBytesRelative-68]
|
||||||
|
_ = x[FnFibLookup-69]
|
||||||
|
_ = x[FnSockHashUpdate-70]
|
||||||
|
_ = x[FnMsgRedirectHash-71]
|
||||||
|
_ = x[FnSkRedirectHash-72]
|
||||||
|
_ = x[FnLwtPushEncap-73]
|
||||||
|
_ = x[FnLwtSeg6StoreBytes-74]
|
||||||
|
_ = x[FnLwtSeg6AdjustSrh-75]
|
||||||
|
_ = x[FnLwtSeg6Action-76]
|
||||||
|
_ = x[FnRcRepeat-77]
|
||||||
|
_ = x[FnRcKeydown-78]
|
||||||
|
_ = x[FnSkbCgroupId-79]
|
||||||
|
_ = x[FnGetCurrentCgroupId-80]
|
||||||
|
_ = x[FnGetLocalStorage-81]
|
||||||
|
_ = x[FnSkSelectReuseport-82]
|
||||||
|
_ = x[FnSkbAncestorCgroupId-83]
|
||||||
|
_ = x[FnSkLookupTcp-84]
|
||||||
|
_ = x[FnSkLookupUdp-85]
|
||||||
|
_ = x[FnSkRelease-86]
|
||||||
|
_ = x[FnMapPushElem-87]
|
||||||
|
_ = x[FnMapPopElem-88]
|
||||||
|
_ = x[FnMapPeekElem-89]
|
||||||
|
_ = x[FnMsgPushData-90]
|
||||||
|
_ = x[FnMsgPopData-91]
|
||||||
|
_ = x[FnRcPointerRel-92]
|
||||||
|
_ = x[FnSpinLock-93]
|
||||||
|
_ = x[FnSpinUnlock-94]
|
||||||
|
_ = x[FnSkFullsock-95]
|
||||||
|
_ = x[FnTcpSock-96]
|
||||||
|
_ = x[FnSkbEcnSetCe-97]
|
||||||
|
_ = x[FnGetListenerSock-98]
|
||||||
|
_ = x[FnSkcLookupTcp-99]
|
||||||
|
_ = x[FnTcpCheckSyncookie-100]
|
||||||
|
_ = x[FnSysctlGetName-101]
|
||||||
|
_ = x[FnSysctlGetCurrentValue-102]
|
||||||
|
_ = x[FnSysctlGetNewValue-103]
|
||||||
|
_ = x[FnSysctlSetNewValue-104]
|
||||||
|
_ = x[FnStrtol-105]
|
||||||
|
_ = x[FnStrtoul-106]
|
||||||
|
_ = x[FnSkStorageGet-107]
|
||||||
|
_ = x[FnSkStorageDelete-108]
|
||||||
|
_ = x[FnSendSignal-109]
|
||||||
|
_ = x[FnTcpGenSyncookie-110]
|
||||||
|
}
|
||||||
|
|
||||||
|
const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookie"
|
||||||
|
|
||||||
|
var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632}
|
||||||
|
|
||||||
|
func (i BuiltinFunc) String() string {
|
||||||
|
if i < 0 || i >= BuiltinFunc(len(_BuiltinFunc_index)-1) {
|
||||||
|
return "BuiltinFunc(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||||
|
}
|
||||||
|
return _BuiltinFunc_name[_BuiltinFunc_index[i]:_BuiltinFunc_index[i+1]]
|
||||||
|
}
|
|
@ -0,0 +1,498 @@
|
||||||
|
package asm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha1"
|
||||||
|
"encoding/binary"
|
||||||
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/cilium/ebpf/internal/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// InstructionSize is the size of a BPF instruction in bytes
|
||||||
|
const InstructionSize = 8
|
||||||
|
|
||||||
|
// RawInstructionOffset is an offset in units of raw BPF instructions.
|
||||||
|
type RawInstructionOffset uint64
|
||||||
|
|
||||||
|
// Bytes returns the offset of an instruction in bytes.
|
||||||
|
func (rio RawInstructionOffset) Bytes() uint64 {
|
||||||
|
return uint64(rio) * InstructionSize
|
||||||
|
}
|
||||||
|
|
||||||
|
// Instruction is a single eBPF instruction.
|
||||||
|
type Instruction struct {
|
||||||
|
OpCode OpCode
|
||||||
|
Dst Register
|
||||||
|
Src Register
|
||||||
|
Offset int16
|
||||||
|
Constant int64
|
||||||
|
Reference string
|
||||||
|
Symbol string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sym creates a symbol.
|
||||||
|
func (ins Instruction) Sym(name string) Instruction {
|
||||||
|
ins.Symbol = name
|
||||||
|
return ins
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal decodes a BPF instruction.
|
||||||
|
func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, error) {
|
||||||
|
var bi bpfInstruction
|
||||||
|
err := binary.Read(r, bo, &bi)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ins.OpCode = bi.OpCode
|
||||||
|
ins.Offset = bi.Offset
|
||||||
|
ins.Constant = int64(bi.Constant)
|
||||||
|
ins.Dst, ins.Src, err = bi.Registers.Unmarshal(bo)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("can't unmarshal registers: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bi.OpCode.isDWordLoad() {
|
||||||
|
return InstructionSize, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var bi2 bpfInstruction
|
||||||
|
if err := binary.Read(r, bo, &bi2); err != nil {
|
||||||
|
// No Wrap, to avoid io.EOF clash
|
||||||
|
return 0, errors.New("64bit immediate is missing second half")
|
||||||
|
}
|
||||||
|
if bi2.OpCode != 0 || bi2.Offset != 0 || bi2.Registers != 0 {
|
||||||
|
return 0, errors.New("64bit immediate has non-zero fields")
|
||||||
|
}
|
||||||
|
ins.Constant = int64(uint64(uint32(bi2.Constant))<<32 | uint64(uint32(bi.Constant)))
|
||||||
|
|
||||||
|
return 2 * InstructionSize, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal encodes a BPF instruction.
|
||||||
|
func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error) {
|
||||||
|
if ins.OpCode == InvalidOpCode {
|
||||||
|
return 0, errors.New("invalid opcode")
|
||||||
|
}
|
||||||
|
|
||||||
|
isDWordLoad := ins.OpCode.isDWordLoad()
|
||||||
|
|
||||||
|
cons := int32(ins.Constant)
|
||||||
|
if isDWordLoad {
|
||||||
|
// Encode least significant 32bit first for 64bit operations.
|
||||||
|
cons = int32(uint32(ins.Constant))
|
||||||
|
}
|
||||||
|
|
||||||
|
regs, err := newBPFRegisters(ins.Dst, ins.Src, bo)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("can't marshal registers: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
bpfi := bpfInstruction{
|
||||||
|
ins.OpCode,
|
||||||
|
regs,
|
||||||
|
ins.Offset,
|
||||||
|
cons,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := binary.Write(w, bo, &bpfi); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isDWordLoad {
|
||||||
|
return InstructionSize, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
bpfi = bpfInstruction{
|
||||||
|
Constant: int32(ins.Constant >> 32),
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := binary.Write(w, bo, &bpfi); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return 2 * InstructionSize, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RewriteMapPtr changes an instruction to use a new map fd.
|
||||||
|
//
|
||||||
|
// Returns an error if the instruction doesn't load a map.
|
||||||
|
func (ins *Instruction) RewriteMapPtr(fd int) error {
|
||||||
|
if !ins.OpCode.isDWordLoad() {
|
||||||
|
return fmt.Errorf("%s is not a 64 bit load", ins.OpCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ins.Src != PseudoMapFD && ins.Src != PseudoMapValue {
|
||||||
|
return errors.New("not a load from a map")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Preserve the offset value for direct map loads.
|
||||||
|
offset := uint64(ins.Constant) & (math.MaxUint32 << 32)
|
||||||
|
rawFd := uint64(uint32(fd))
|
||||||
|
ins.Constant = int64(offset | rawFd)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ins *Instruction) mapPtr() uint32 {
|
||||||
|
return uint32(uint64(ins.Constant) & math.MaxUint32)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RewriteMapOffset changes the offset of a direct load from a map.
|
||||||
|
//
|
||||||
|
// Returns an error if the instruction is not a direct load.
|
||||||
|
func (ins *Instruction) RewriteMapOffset(offset uint32) error {
|
||||||
|
if !ins.OpCode.isDWordLoad() {
|
||||||
|
return fmt.Errorf("%s is not a 64 bit load", ins.OpCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ins.Src != PseudoMapValue {
|
||||||
|
return errors.New("not a direct load from a map")
|
||||||
|
}
|
||||||
|
|
||||||
|
fd := uint64(ins.Constant) & math.MaxUint32
|
||||||
|
ins.Constant = int64(uint64(offset)<<32 | fd)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ins *Instruction) mapOffset() uint32 {
|
||||||
|
return uint32(uint64(ins.Constant) >> 32)
|
||||||
|
}
|
||||||
|
|
||||||
|
// isLoadFromMap returns true if the instruction loads from a map.
|
||||||
|
//
|
||||||
|
// This covers both loading the map pointer and direct map value loads.
|
||||||
|
func (ins *Instruction) isLoadFromMap() bool {
|
||||||
|
return ins.OpCode == LoadImmOp(DWord) && (ins.Src == PseudoMapFD || ins.Src == PseudoMapValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsFunctionCall returns true if the instruction calls another BPF function.
|
||||||
|
//
|
||||||
|
// This is not the same thing as a BPF helper call.
|
||||||
|
func (ins *Instruction) IsFunctionCall() bool {
|
||||||
|
return ins.OpCode.JumpOp() == Call && ins.Src == PseudoCall
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format implements fmt.Formatter.
|
||||||
|
func (ins Instruction) Format(f fmt.State, c rune) {
|
||||||
|
if c != 'v' {
|
||||||
|
fmt.Fprintf(f, "{UNRECOGNIZED: %c}", c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
op := ins.OpCode
|
||||||
|
|
||||||
|
if op == InvalidOpCode {
|
||||||
|
fmt.Fprint(f, "INVALID")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Omit trailing space for Exit
|
||||||
|
if op.JumpOp() == Exit {
|
||||||
|
fmt.Fprint(f, op)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if ins.isLoadFromMap() {
|
||||||
|
fd := int32(ins.mapPtr())
|
||||||
|
switch ins.Src {
|
||||||
|
case PseudoMapFD:
|
||||||
|
fmt.Fprintf(f, "LoadMapPtr dst: %s fd: %d", ins.Dst, fd)
|
||||||
|
|
||||||
|
case PseudoMapValue:
|
||||||
|
fmt.Fprintf(f, "LoadMapValue dst: %s, fd: %d off: %d", ins.Dst, fd, ins.mapOffset())
|
||||||
|
}
|
||||||
|
|
||||||
|
goto ref
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(f, "%v ", op)
|
||||||
|
switch cls := op.Class(); cls {
|
||||||
|
case LdClass, LdXClass, StClass, StXClass:
|
||||||
|
switch op.Mode() {
|
||||||
|
case ImmMode:
|
||||||
|
fmt.Fprintf(f, "dst: %s imm: %d", ins.Dst, ins.Constant)
|
||||||
|
case AbsMode:
|
||||||
|
fmt.Fprintf(f, "imm: %d", ins.Constant)
|
||||||
|
case IndMode:
|
||||||
|
fmt.Fprintf(f, "dst: %s src: %s imm: %d", ins.Dst, ins.Src, ins.Constant)
|
||||||
|
case MemMode:
|
||||||
|
fmt.Fprintf(f, "dst: %s src: %s off: %d imm: %d", ins.Dst, ins.Src, ins.Offset, ins.Constant)
|
||||||
|
case XAddMode:
|
||||||
|
fmt.Fprintf(f, "dst: %s src: %s", ins.Dst, ins.Src)
|
||||||
|
}
|
||||||
|
|
||||||
|
case ALU64Class, ALUClass:
|
||||||
|
fmt.Fprintf(f, "dst: %s ", ins.Dst)
|
||||||
|
if op.ALUOp() == Swap || op.Source() == ImmSource {
|
||||||
|
fmt.Fprintf(f, "imm: %d", ins.Constant)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(f, "src: %s", ins.Src)
|
||||||
|
}
|
||||||
|
|
||||||
|
case JumpClass:
|
||||||
|
switch jop := op.JumpOp(); jop {
|
||||||
|
case Call:
|
||||||
|
if ins.Src == PseudoCall {
|
||||||
|
// bpf-to-bpf call
|
||||||
|
fmt.Fprint(f, ins.Constant)
|
||||||
|
} else {
|
||||||
|
fmt.Fprint(f, BuiltinFunc(ins.Constant))
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
fmt.Fprintf(f, "dst: %s off: %d ", ins.Dst, ins.Offset)
|
||||||
|
if op.Source() == ImmSource {
|
||||||
|
fmt.Fprintf(f, "imm: %d", ins.Constant)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(f, "src: %s", ins.Src)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ref:
|
||||||
|
if ins.Reference != "" {
|
||||||
|
fmt.Fprintf(f, " <%s>", ins.Reference)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Instructions is an eBPF program.
|
||||||
|
type Instructions []Instruction
|
||||||
|
|
||||||
|
func (insns Instructions) String() string {
|
||||||
|
return fmt.Sprint(insns)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RewriteMapPtr rewrites all loads of a specific map pointer to a new fd.
|
||||||
|
//
|
||||||
|
// Returns an error if the symbol isn't used, see IsUnreferencedSymbol.
|
||||||
|
func (insns Instructions) RewriteMapPtr(symbol string, fd int) error {
|
||||||
|
if symbol == "" {
|
||||||
|
return errors.New("empty symbol")
|
||||||
|
}
|
||||||
|
|
||||||
|
found := false
|
||||||
|
for i := range insns {
|
||||||
|
ins := &insns[i]
|
||||||
|
if ins.Reference != symbol {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ins.RewriteMapPtr(fd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
found = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
return &unreferencedSymbolError{symbol}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SymbolOffsets returns the set of symbols and their offset in
|
||||||
|
// the instructions.
|
||||||
|
func (insns Instructions) SymbolOffsets() (map[string]int, error) {
|
||||||
|
offsets := make(map[string]int)
|
||||||
|
|
||||||
|
for i, ins := range insns {
|
||||||
|
if ins.Symbol == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := offsets[ins.Symbol]; ok {
|
||||||
|
return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol)
|
||||||
|
}
|
||||||
|
|
||||||
|
offsets[ins.Symbol] = i
|
||||||
|
}
|
||||||
|
|
||||||
|
return offsets, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReferenceOffsets returns the set of references and their offset in
|
||||||
|
// the instructions.
|
||||||
|
func (insns Instructions) ReferenceOffsets() map[string][]int {
|
||||||
|
offsets := make(map[string][]int)
|
||||||
|
|
||||||
|
for i, ins := range insns {
|
||||||
|
if ins.Reference == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
offsets[ins.Reference] = append(offsets[ins.Reference], i)
|
||||||
|
}
|
||||||
|
|
||||||
|
return offsets
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format implements fmt.Formatter.
|
||||||
|
//
|
||||||
|
// You can control indentation of symbols by
|
||||||
|
// specifying a width. Setting a precision controls the indentation of
|
||||||
|
// instructions.
|
||||||
|
// The default character is a tab, which can be overriden by specifying
|
||||||
|
// the ' ' space flag.
|
||||||
|
func (insns Instructions) Format(f fmt.State, c rune) {
|
||||||
|
if c != 's' && c != 'v' {
|
||||||
|
fmt.Fprintf(f, "{UNKNOWN FORMAT '%c'}", c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Precision is better in this case, because it allows
|
||||||
|
// specifying 0 padding easily.
|
||||||
|
padding, ok := f.Precision()
|
||||||
|
if !ok {
|
||||||
|
padding = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
indent := strings.Repeat("\t", padding)
|
||||||
|
if f.Flag(' ') {
|
||||||
|
indent = strings.Repeat(" ", padding)
|
||||||
|
}
|
||||||
|
|
||||||
|
symPadding, ok := f.Width()
|
||||||
|
if !ok {
|
||||||
|
symPadding = padding - 1
|
||||||
|
}
|
||||||
|
if symPadding < 0 {
|
||||||
|
symPadding = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
symIndent := strings.Repeat("\t", symPadding)
|
||||||
|
if f.Flag(' ') {
|
||||||
|
symIndent = strings.Repeat(" ", symPadding)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Guess how many digits we need at most, by assuming that all instructions
|
||||||
|
// are double wide.
|
||||||
|
highestOffset := len(insns) * 2
|
||||||
|
offsetWidth := int(math.Ceil(math.Log10(float64(highestOffset))))
|
||||||
|
|
||||||
|
iter := insns.Iterate()
|
||||||
|
for iter.Next() {
|
||||||
|
if iter.Ins.Symbol != "" {
|
||||||
|
fmt.Fprintf(f, "%s%s:\n", symIndent, iter.Ins.Symbol)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(f, "%s%*d: %v\n", indent, offsetWidth, iter.Offset, iter.Ins)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal encodes a BPF program into the kernel format.
|
||||||
|
func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error {
|
||||||
|
for i, ins := range insns {
|
||||||
|
_, err := ins.Marshal(w, bo)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("instruction %d: %w", i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tag calculates the kernel tag for a series of instructions.
|
||||||
|
//
|
||||||
|
// It mirrors bpf_prog_calc_tag in the kernel and so can be compared
|
||||||
|
// to ProgramInfo.Tag to figure out whether a loaded program matches
|
||||||
|
// certain instructions.
|
||||||
|
func (insns Instructions) Tag(bo binary.ByteOrder) (string, error) {
|
||||||
|
h := sha1.New()
|
||||||
|
for i, ins := range insns {
|
||||||
|
if ins.isLoadFromMap() {
|
||||||
|
ins.Constant = 0
|
||||||
|
}
|
||||||
|
_, err := ins.Marshal(h, bo)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("instruction %d: %w", i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return hex.EncodeToString(h.Sum(nil)[:unix.BPF_TAG_SIZE]), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterate allows iterating a BPF program while keeping track of
|
||||||
|
// various offsets.
|
||||||
|
//
|
||||||
|
// Modifying the instruction slice will lead to undefined behaviour.
|
||||||
|
func (insns Instructions) Iterate() *InstructionIterator {
|
||||||
|
return &InstructionIterator{insns: insns}
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstructionIterator iterates over a BPF program.
|
||||||
|
type InstructionIterator struct {
|
||||||
|
insns Instructions
|
||||||
|
// The instruction in question.
|
||||||
|
Ins *Instruction
|
||||||
|
// The index of the instruction in the original instruction slice.
|
||||||
|
Index int
|
||||||
|
// The offset of the instruction in raw BPF instructions. This accounts
|
||||||
|
// for double-wide instructions.
|
||||||
|
Offset RawInstructionOffset
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next returns true as long as there are any instructions remaining.
|
||||||
|
func (iter *InstructionIterator) Next() bool {
|
||||||
|
if len(iter.insns) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if iter.Ins != nil {
|
||||||
|
iter.Index++
|
||||||
|
iter.Offset += RawInstructionOffset(iter.Ins.OpCode.rawInstructions())
|
||||||
|
}
|
||||||
|
iter.Ins = &iter.insns[0]
|
||||||
|
iter.insns = iter.insns[1:]
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
type bpfInstruction struct {
|
||||||
|
OpCode OpCode
|
||||||
|
Registers bpfRegisters
|
||||||
|
Offset int16
|
||||||
|
Constant int32
|
||||||
|
}
|
||||||
|
|
||||||
|
type bpfRegisters uint8
|
||||||
|
|
||||||
|
func newBPFRegisters(dst, src Register, bo binary.ByteOrder) (bpfRegisters, error) {
|
||||||
|
switch bo {
|
||||||
|
case binary.LittleEndian:
|
||||||
|
return bpfRegisters((src << 4) | (dst & 0xF)), nil
|
||||||
|
case binary.BigEndian:
|
||||||
|
return bpfRegisters((dst << 4) | (src & 0xF)), nil
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("unrecognized ByteOrder %T", bo)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r bpfRegisters) Unmarshal(bo binary.ByteOrder) (dst, src Register, err error) {
|
||||||
|
switch bo {
|
||||||
|
case binary.LittleEndian:
|
||||||
|
return Register(r & 0xF), Register(r >> 4), nil
|
||||||
|
case binary.BigEndian:
|
||||||
|
return Register(r >> 4), Register(r & 0xf), nil
|
||||||
|
default:
|
||||||
|
return 0, 0, fmt.Errorf("unrecognized ByteOrder %T", bo)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type unreferencedSymbolError struct {
|
||||||
|
symbol string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (use *unreferencedSymbolError) Error() string {
|
||||||
|
return fmt.Sprintf("unreferenced symbol %s", use.symbol)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsUnreferencedSymbol returns true if err was caused by
|
||||||
|
// an unreferenced symbol.
|
||||||
|
func IsUnreferencedSymbol(err error) bool {
|
||||||
|
_, ok := err.(*unreferencedSymbolError)
|
||||||
|
return ok
|
||||||
|
}
|
|
@ -0,0 +1,109 @@
|
||||||
|
package asm
|
||||||
|
|
||||||
|
//go:generate stringer -output jump_string.go -type=JumpOp
|
||||||
|
|
||||||
|
// JumpOp affect control flow.
|
||||||
|
//
|
||||||
|
// msb lsb
|
||||||
|
// +----+-+---+
|
||||||
|
// |OP |s|cls|
|
||||||
|
// +----+-+---+
|
||||||
|
type JumpOp uint8
|
||||||
|
|
||||||
|
const jumpMask OpCode = aluMask
|
||||||
|
|
||||||
|
const (
|
||||||
|
// InvalidJumpOp is returned by getters when invoked
|
||||||
|
// on non branch OpCodes
|
||||||
|
InvalidJumpOp JumpOp = 0xff
|
||||||
|
// Ja jumps by offset unconditionally
|
||||||
|
Ja JumpOp = 0x00
|
||||||
|
// JEq jumps by offset if r == imm
|
||||||
|
JEq JumpOp = 0x10
|
||||||
|
// JGT jumps by offset if r > imm
|
||||||
|
JGT JumpOp = 0x20
|
||||||
|
// JGE jumps by offset if r >= imm
|
||||||
|
JGE JumpOp = 0x30
|
||||||
|
// JSet jumps by offset if r & imm
|
||||||
|
JSet JumpOp = 0x40
|
||||||
|
// JNE jumps by offset if r != imm
|
||||||
|
JNE JumpOp = 0x50
|
||||||
|
// JSGT jumps by offset if signed r > signed imm
|
||||||
|
JSGT JumpOp = 0x60
|
||||||
|
// JSGE jumps by offset if signed r >= signed imm
|
||||||
|
JSGE JumpOp = 0x70
|
||||||
|
// Call builtin or user defined function from imm
|
||||||
|
Call JumpOp = 0x80
|
||||||
|
// Exit ends execution, with value in r0
|
||||||
|
Exit JumpOp = 0x90
|
||||||
|
// JLT jumps by offset if r < imm
|
||||||
|
JLT JumpOp = 0xa0
|
||||||
|
// JLE jumps by offset if r <= imm
|
||||||
|
JLE JumpOp = 0xb0
|
||||||
|
// JSLT jumps by offset if signed r < signed imm
|
||||||
|
JSLT JumpOp = 0xc0
|
||||||
|
// JSLE jumps by offset if signed r <= signed imm
|
||||||
|
JSLE JumpOp = 0xd0
|
||||||
|
)
|
||||||
|
|
||||||
|
// Return emits an exit instruction.
|
||||||
|
//
|
||||||
|
// Requires a return value in R0.
|
||||||
|
func Return() Instruction {
|
||||||
|
return Instruction{
|
||||||
|
OpCode: OpCode(JumpClass).SetJumpOp(Exit),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Op returns the OpCode for a given jump source.
|
||||||
|
func (op JumpOp) Op(source Source) OpCode {
|
||||||
|
return OpCode(JumpClass).SetJumpOp(op).SetSource(source)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Imm compares dst to value, and adjusts PC by offset if the condition is fulfilled.
|
||||||
|
func (op JumpOp) Imm(dst Register, value int32, label string) Instruction {
|
||||||
|
if op == Exit || op == Call || op == Ja {
|
||||||
|
return Instruction{OpCode: InvalidOpCode}
|
||||||
|
}
|
||||||
|
|
||||||
|
return Instruction{
|
||||||
|
OpCode: OpCode(JumpClass).SetJumpOp(op).SetSource(ImmSource),
|
||||||
|
Dst: dst,
|
||||||
|
Offset: -1,
|
||||||
|
Constant: int64(value),
|
||||||
|
Reference: label,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reg compares dst to src, and adjusts PC by offset if the condition is fulfilled.
|
||||||
|
func (op JumpOp) Reg(dst, src Register, label string) Instruction {
|
||||||
|
if op == Exit || op == Call || op == Ja {
|
||||||
|
return Instruction{OpCode: InvalidOpCode}
|
||||||
|
}
|
||||||
|
|
||||||
|
return Instruction{
|
||||||
|
OpCode: OpCode(JumpClass).SetJumpOp(op).SetSource(RegSource),
|
||||||
|
Dst: dst,
|
||||||
|
Src: src,
|
||||||
|
Offset: -1,
|
||||||
|
Reference: label,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Label adjusts PC to the address of the label.
|
||||||
|
func (op JumpOp) Label(label string) Instruction {
|
||||||
|
if op == Call {
|
||||||
|
return Instruction{
|
||||||
|
OpCode: OpCode(JumpClass).SetJumpOp(Call),
|
||||||
|
Src: PseudoCall,
|
||||||
|
Constant: -1,
|
||||||
|
Reference: label,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return Instruction{
|
||||||
|
OpCode: OpCode(JumpClass).SetJumpOp(op),
|
||||||
|
Offset: -1,
|
||||||
|
Reference: label,
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,53 @@
|
||||||
|
// Code generated by "stringer -output jump_string.go -type=JumpOp"; DO NOT EDIT.
|
||||||
|
|
||||||
|
package asm
|
||||||
|
|
||||||
|
import "strconv"
|
||||||
|
|
||||||
|
func _() {
|
||||||
|
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||||
|
// Re-run the stringer command to generate them again.
|
||||||
|
var x [1]struct{}
|
||||||
|
_ = x[InvalidJumpOp-255]
|
||||||
|
_ = x[Ja-0]
|
||||||
|
_ = x[JEq-16]
|
||||||
|
_ = x[JGT-32]
|
||||||
|
_ = x[JGE-48]
|
||||||
|
_ = x[JSet-64]
|
||||||
|
_ = x[JNE-80]
|
||||||
|
_ = x[JSGT-96]
|
||||||
|
_ = x[JSGE-112]
|
||||||
|
_ = x[Call-128]
|
||||||
|
_ = x[Exit-144]
|
||||||
|
_ = x[JLT-160]
|
||||||
|
_ = x[JLE-176]
|
||||||
|
_ = x[JSLT-192]
|
||||||
|
_ = x[JSLE-208]
|
||||||
|
}
|
||||||
|
|
||||||
|
const _JumpOp_name = "JaJEqJGTJGEJSetJNEJSGTJSGECallExitJLTJLEJSLTJSLEInvalidJumpOp"
|
||||||
|
|
||||||
|
var _JumpOp_map = map[JumpOp]string{
|
||||||
|
0: _JumpOp_name[0:2],
|
||||||
|
16: _JumpOp_name[2:5],
|
||||||
|
32: _JumpOp_name[5:8],
|
||||||
|
48: _JumpOp_name[8:11],
|
||||||
|
64: _JumpOp_name[11:15],
|
||||||
|
80: _JumpOp_name[15:18],
|
||||||
|
96: _JumpOp_name[18:22],
|
||||||
|
112: _JumpOp_name[22:26],
|
||||||
|
128: _JumpOp_name[26:30],
|
||||||
|
144: _JumpOp_name[30:34],
|
||||||
|
160: _JumpOp_name[34:37],
|
||||||
|
176: _JumpOp_name[37:40],
|
||||||
|
192: _JumpOp_name[40:44],
|
||||||
|
208: _JumpOp_name[44:48],
|
||||||
|
255: _JumpOp_name[48:61],
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i JumpOp) String() string {
|
||||||
|
if str, ok := _JumpOp_map[i]; ok {
|
||||||
|
return str
|
||||||
|
}
|
||||||
|
return "JumpOp(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||||
|
}
|
|
@ -0,0 +1,204 @@
|
||||||
|
package asm
|
||||||
|
|
||||||
|
//go:generate stringer -output load_store_string.go -type=Mode,Size
|
||||||
|
|
||||||
|
// Mode for load and store operations
|
||||||
|
//
|
||||||
|
// msb lsb
|
||||||
|
// +---+--+---+
|
||||||
|
// |MDE|sz|cls|
|
||||||
|
// +---+--+---+
|
||||||
|
type Mode uint8
|
||||||
|
|
||||||
|
const modeMask OpCode = 0xe0
|
||||||
|
|
||||||
|
const (
|
||||||
|
// InvalidMode is returned by getters when invoked
|
||||||
|
// on non load / store OpCodes
|
||||||
|
InvalidMode Mode = 0xff
|
||||||
|
// ImmMode - immediate value
|
||||||
|
ImmMode Mode = 0x00
|
||||||
|
// AbsMode - immediate value + offset
|
||||||
|
AbsMode Mode = 0x20
|
||||||
|
// IndMode - indirect (imm+src)
|
||||||
|
IndMode Mode = 0x40
|
||||||
|
// MemMode - load from memory
|
||||||
|
MemMode Mode = 0x60
|
||||||
|
// XAddMode - add atomically across processors.
|
||||||
|
XAddMode Mode = 0xc0
|
||||||
|
)
|
||||||
|
|
||||||
|
// Size of load and store operations
|
||||||
|
//
|
||||||
|
// msb lsb
|
||||||
|
// +---+--+---+
|
||||||
|
// |mde|SZ|cls|
|
||||||
|
// +---+--+---+
|
||||||
|
type Size uint8
|
||||||
|
|
||||||
|
const sizeMask OpCode = 0x18
|
||||||
|
|
||||||
|
const (
|
||||||
|
// InvalidSize is returned by getters when invoked
|
||||||
|
// on non load / store OpCodes
|
||||||
|
InvalidSize Size = 0xff
|
||||||
|
// DWord - double word; 64 bits
|
||||||
|
DWord Size = 0x18
|
||||||
|
// Word - word; 32 bits
|
||||||
|
Word Size = 0x00
|
||||||
|
// Half - half-word; 16 bits
|
||||||
|
Half Size = 0x08
|
||||||
|
// Byte - byte; 8 bits
|
||||||
|
Byte Size = 0x10
|
||||||
|
)
|
||||||
|
|
||||||
|
// Sizeof returns the size in bytes.
|
||||||
|
func (s Size) Sizeof() int {
|
||||||
|
switch s {
|
||||||
|
case DWord:
|
||||||
|
return 8
|
||||||
|
case Word:
|
||||||
|
return 4
|
||||||
|
case Half:
|
||||||
|
return 2
|
||||||
|
case Byte:
|
||||||
|
return 1
|
||||||
|
default:
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadMemOp returns the OpCode to load a value of given size from memory.
|
||||||
|
func LoadMemOp(size Size) OpCode {
|
||||||
|
return OpCode(LdXClass).SetMode(MemMode).SetSize(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadMem emits `dst = *(size *)(src + offset)`.
|
||||||
|
func LoadMem(dst, src Register, offset int16, size Size) Instruction {
|
||||||
|
return Instruction{
|
||||||
|
OpCode: LoadMemOp(size),
|
||||||
|
Dst: dst,
|
||||||
|
Src: src,
|
||||||
|
Offset: offset,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadImmOp returns the OpCode to load an immediate of given size.
|
||||||
|
//
|
||||||
|
// As of kernel 4.20, only DWord size is accepted.
|
||||||
|
func LoadImmOp(size Size) OpCode {
|
||||||
|
return OpCode(LdClass).SetMode(ImmMode).SetSize(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadImm emits `dst = (size)value`.
|
||||||
|
//
|
||||||
|
// As of kernel 4.20, only DWord size is accepted.
|
||||||
|
func LoadImm(dst Register, value int64, size Size) Instruction {
|
||||||
|
return Instruction{
|
||||||
|
OpCode: LoadImmOp(size),
|
||||||
|
Dst: dst,
|
||||||
|
Constant: value,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadMapPtr stores a pointer to a map in dst.
|
||||||
|
func LoadMapPtr(dst Register, fd int) Instruction {
|
||||||
|
if fd < 0 {
|
||||||
|
return Instruction{OpCode: InvalidOpCode}
|
||||||
|
}
|
||||||
|
|
||||||
|
return Instruction{
|
||||||
|
OpCode: LoadImmOp(DWord),
|
||||||
|
Dst: dst,
|
||||||
|
Src: PseudoMapFD,
|
||||||
|
Constant: int64(fd),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadMapValue stores a pointer to the value at a certain offset of a map.
|
||||||
|
func LoadMapValue(dst Register, fd int, offset uint32) Instruction {
|
||||||
|
if fd < 0 {
|
||||||
|
return Instruction{OpCode: InvalidOpCode}
|
||||||
|
}
|
||||||
|
|
||||||
|
fdAndOffset := (uint64(offset) << 32) | uint64(uint32(fd))
|
||||||
|
return Instruction{
|
||||||
|
OpCode: LoadImmOp(DWord),
|
||||||
|
Dst: dst,
|
||||||
|
Src: PseudoMapValue,
|
||||||
|
Constant: int64(fdAndOffset),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadIndOp returns the OpCode for loading a value of given size from an sk_buff.
|
||||||
|
func LoadIndOp(size Size) OpCode {
|
||||||
|
return OpCode(LdClass).SetMode(IndMode).SetSize(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadInd emits `dst = ntoh(*(size *)(((sk_buff *)R6)->data + src + offset))`.
|
||||||
|
func LoadInd(dst, src Register, offset int32, size Size) Instruction {
|
||||||
|
return Instruction{
|
||||||
|
OpCode: LoadIndOp(size),
|
||||||
|
Dst: dst,
|
||||||
|
Src: src,
|
||||||
|
Constant: int64(offset),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadAbsOp returns the OpCode for loading a value of given size from an sk_buff.
|
||||||
|
func LoadAbsOp(size Size) OpCode {
|
||||||
|
return OpCode(LdClass).SetMode(AbsMode).SetSize(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadAbs emits `r0 = ntoh(*(size *)(((sk_buff *)R6)->data + offset))`.
|
||||||
|
func LoadAbs(offset int32, size Size) Instruction {
|
||||||
|
return Instruction{
|
||||||
|
OpCode: LoadAbsOp(size),
|
||||||
|
Dst: R0,
|
||||||
|
Constant: int64(offset),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StoreMemOp returns the OpCode for storing a register of given size in memory.
|
||||||
|
func StoreMemOp(size Size) OpCode {
|
||||||
|
return OpCode(StXClass).SetMode(MemMode).SetSize(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StoreMem emits `*(size *)(dst + offset) = src`
|
||||||
|
func StoreMem(dst Register, offset int16, src Register, size Size) Instruction {
|
||||||
|
return Instruction{
|
||||||
|
OpCode: StoreMemOp(size),
|
||||||
|
Dst: dst,
|
||||||
|
Src: src,
|
||||||
|
Offset: offset,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StoreImmOp returns the OpCode for storing an immediate of given size in memory.
|
||||||
|
func StoreImmOp(size Size) OpCode {
|
||||||
|
return OpCode(StClass).SetMode(MemMode).SetSize(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StoreImm emits `*(size *)(dst + offset) = value`.
|
||||||
|
func StoreImm(dst Register, offset int16, value int64, size Size) Instruction {
|
||||||
|
return Instruction{
|
||||||
|
OpCode: StoreImmOp(size),
|
||||||
|
Dst: dst,
|
||||||
|
Offset: offset,
|
||||||
|
Constant: value,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StoreXAddOp returns the OpCode to atomically add a register to a value in memory.
|
||||||
|
func StoreXAddOp(size Size) OpCode {
|
||||||
|
return OpCode(StXClass).SetMode(XAddMode).SetSize(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StoreXAdd atomically adds src to *dst.
|
||||||
|
func StoreXAdd(dst, src Register, size Size) Instruction {
|
||||||
|
return Instruction{
|
||||||
|
OpCode: StoreXAddOp(size),
|
||||||
|
Dst: dst,
|
||||||
|
Src: src,
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,80 @@
|
||||||
|
// Code generated by "stringer -output load_store_string.go -type=Mode,Size"; DO NOT EDIT.
|
||||||
|
|
||||||
|
package asm
|
||||||
|
|
||||||
|
import "strconv"
|
||||||
|
|
||||||
|
func _() {
|
||||||
|
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||||
|
// Re-run the stringer command to generate them again.
|
||||||
|
var x [1]struct{}
|
||||||
|
_ = x[InvalidMode-255]
|
||||||
|
_ = x[ImmMode-0]
|
||||||
|
_ = x[AbsMode-32]
|
||||||
|
_ = x[IndMode-64]
|
||||||
|
_ = x[MemMode-96]
|
||||||
|
_ = x[XAddMode-192]
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
_Mode_name_0 = "ImmMode"
|
||||||
|
_Mode_name_1 = "AbsMode"
|
||||||
|
_Mode_name_2 = "IndMode"
|
||||||
|
_Mode_name_3 = "MemMode"
|
||||||
|
_Mode_name_4 = "XAddMode"
|
||||||
|
_Mode_name_5 = "InvalidMode"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (i Mode) String() string {
|
||||||
|
switch {
|
||||||
|
case i == 0:
|
||||||
|
return _Mode_name_0
|
||||||
|
case i == 32:
|
||||||
|
return _Mode_name_1
|
||||||
|
case i == 64:
|
||||||
|
return _Mode_name_2
|
||||||
|
case i == 96:
|
||||||
|
return _Mode_name_3
|
||||||
|
case i == 192:
|
||||||
|
return _Mode_name_4
|
||||||
|
case i == 255:
|
||||||
|
return _Mode_name_5
|
||||||
|
default:
|
||||||
|
return "Mode(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func _() {
|
||||||
|
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||||
|
// Re-run the stringer command to generate them again.
|
||||||
|
var x [1]struct{}
|
||||||
|
_ = x[InvalidSize-255]
|
||||||
|
_ = x[DWord-24]
|
||||||
|
_ = x[Word-0]
|
||||||
|
_ = x[Half-8]
|
||||||
|
_ = x[Byte-16]
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
_Size_name_0 = "Word"
|
||||||
|
_Size_name_1 = "Half"
|
||||||
|
_Size_name_2 = "Byte"
|
||||||
|
_Size_name_3 = "DWord"
|
||||||
|
_Size_name_4 = "InvalidSize"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (i Size) String() string {
|
||||||
|
switch {
|
||||||
|
case i == 0:
|
||||||
|
return _Size_name_0
|
||||||
|
case i == 8:
|
||||||
|
return _Size_name_1
|
||||||
|
case i == 16:
|
||||||
|
return _Size_name_2
|
||||||
|
case i == 24:
|
||||||
|
return _Size_name_3
|
||||||
|
case i == 255:
|
||||||
|
return _Size_name_4
|
||||||
|
default:
|
||||||
|
return "Size(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,237 @@
|
||||||
|
package asm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:generate stringer -output opcode_string.go -type=Class
|
||||||
|
|
||||||
|
type encoding int
|
||||||
|
|
||||||
|
const (
|
||||||
|
unknownEncoding encoding = iota
|
||||||
|
loadOrStore
|
||||||
|
jumpOrALU
|
||||||
|
)
|
||||||
|
|
||||||
|
// Class of operations
|
||||||
|
//
|
||||||
|
// msb lsb
|
||||||
|
// +---+--+---+
|
||||||
|
// | ?? |CLS|
|
||||||
|
// +---+--+---+
|
||||||
|
type Class uint8
|
||||||
|
|
||||||
|
const classMask OpCode = 0x07
|
||||||
|
|
||||||
|
const (
|
||||||
|
// LdClass load memory
|
||||||
|
LdClass Class = 0x00
|
||||||
|
// LdXClass load memory from constant
|
||||||
|
LdXClass Class = 0x01
|
||||||
|
// StClass load register from memory
|
||||||
|
StClass Class = 0x02
|
||||||
|
// StXClass load register from constant
|
||||||
|
StXClass Class = 0x03
|
||||||
|
// ALUClass arithmetic operators
|
||||||
|
ALUClass Class = 0x04
|
||||||
|
// JumpClass jump operators
|
||||||
|
JumpClass Class = 0x05
|
||||||
|
// ALU64Class arithmetic in 64 bit mode
|
||||||
|
ALU64Class Class = 0x07
|
||||||
|
)
|
||||||
|
|
||||||
|
func (cls Class) encoding() encoding {
|
||||||
|
switch cls {
|
||||||
|
case LdClass, LdXClass, StClass, StXClass:
|
||||||
|
return loadOrStore
|
||||||
|
case ALU64Class, ALUClass, JumpClass:
|
||||||
|
return jumpOrALU
|
||||||
|
default:
|
||||||
|
return unknownEncoding
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpCode is a packed eBPF opcode.
|
||||||
|
//
|
||||||
|
// Its encoding is defined by a Class value:
|
||||||
|
//
|
||||||
|
// msb lsb
|
||||||
|
// +----+-+---+
|
||||||
|
// | ???? |CLS|
|
||||||
|
// +----+-+---+
|
||||||
|
type OpCode uint8
|
||||||
|
|
||||||
|
// InvalidOpCode is returned by setters on OpCode
|
||||||
|
const InvalidOpCode OpCode = 0xff
|
||||||
|
|
||||||
|
// rawInstructions returns the number of BPF instructions required
|
||||||
|
// to encode this opcode.
|
||||||
|
func (op OpCode) rawInstructions() int {
|
||||||
|
if op.isDWordLoad() {
|
||||||
|
return 2
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op OpCode) isDWordLoad() bool {
|
||||||
|
return op == LoadImmOp(DWord)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Class returns the class of operation.
|
||||||
|
func (op OpCode) Class() Class {
|
||||||
|
return Class(op & classMask)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mode returns the mode for load and store operations.
|
||||||
|
func (op OpCode) Mode() Mode {
|
||||||
|
if op.Class().encoding() != loadOrStore {
|
||||||
|
return InvalidMode
|
||||||
|
}
|
||||||
|
return Mode(op & modeMask)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns the size for load and store operations.
|
||||||
|
func (op OpCode) Size() Size {
|
||||||
|
if op.Class().encoding() != loadOrStore {
|
||||||
|
return InvalidSize
|
||||||
|
}
|
||||||
|
return Size(op & sizeMask)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Source returns the source for branch and ALU operations.
|
||||||
|
func (op OpCode) Source() Source {
|
||||||
|
if op.Class().encoding() != jumpOrALU || op.ALUOp() == Swap {
|
||||||
|
return InvalidSource
|
||||||
|
}
|
||||||
|
return Source(op & sourceMask)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ALUOp returns the ALUOp.
|
||||||
|
func (op OpCode) ALUOp() ALUOp {
|
||||||
|
if op.Class().encoding() != jumpOrALU {
|
||||||
|
return InvalidALUOp
|
||||||
|
}
|
||||||
|
return ALUOp(op & aluMask)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Endianness returns the Endianness for a byte swap instruction.
|
||||||
|
func (op OpCode) Endianness() Endianness {
|
||||||
|
if op.ALUOp() != Swap {
|
||||||
|
return InvalidEndian
|
||||||
|
}
|
||||||
|
return Endianness(op & endianMask)
|
||||||
|
}
|
||||||
|
|
||||||
|
// JumpOp returns the JumpOp.
|
||||||
|
func (op OpCode) JumpOp() JumpOp {
|
||||||
|
if op.Class().encoding() != jumpOrALU {
|
||||||
|
return InvalidJumpOp
|
||||||
|
}
|
||||||
|
return JumpOp(op & jumpMask)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetMode sets the mode on load and store operations.
|
||||||
|
//
|
||||||
|
// Returns InvalidOpCode if op is of the wrong class.
|
||||||
|
func (op OpCode) SetMode(mode Mode) OpCode {
|
||||||
|
if op.Class().encoding() != loadOrStore || !valid(OpCode(mode), modeMask) {
|
||||||
|
return InvalidOpCode
|
||||||
|
}
|
||||||
|
return (op & ^modeMask) | OpCode(mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSize sets the size on load and store operations.
|
||||||
|
//
|
||||||
|
// Returns InvalidOpCode if op is of the wrong class.
|
||||||
|
func (op OpCode) SetSize(size Size) OpCode {
|
||||||
|
if op.Class().encoding() != loadOrStore || !valid(OpCode(size), sizeMask) {
|
||||||
|
return InvalidOpCode
|
||||||
|
}
|
||||||
|
return (op & ^sizeMask) | OpCode(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSource sets the source on jump and ALU operations.
|
||||||
|
//
|
||||||
|
// Returns InvalidOpCode if op is of the wrong class.
|
||||||
|
func (op OpCode) SetSource(source Source) OpCode {
|
||||||
|
if op.Class().encoding() != jumpOrALU || !valid(OpCode(source), sourceMask) {
|
||||||
|
return InvalidOpCode
|
||||||
|
}
|
||||||
|
return (op & ^sourceMask) | OpCode(source)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetALUOp sets the ALUOp on ALU operations.
|
||||||
|
//
|
||||||
|
// Returns InvalidOpCode if op is of the wrong class.
|
||||||
|
func (op OpCode) SetALUOp(alu ALUOp) OpCode {
|
||||||
|
class := op.Class()
|
||||||
|
if (class != ALUClass && class != ALU64Class) || !valid(OpCode(alu), aluMask) {
|
||||||
|
return InvalidOpCode
|
||||||
|
}
|
||||||
|
return (op & ^aluMask) | OpCode(alu)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetJumpOp sets the JumpOp on jump operations.
|
||||||
|
//
|
||||||
|
// Returns InvalidOpCode if op is of the wrong class.
|
||||||
|
func (op OpCode) SetJumpOp(jump JumpOp) OpCode {
|
||||||
|
if op.Class() != JumpClass || !valid(OpCode(jump), jumpMask) {
|
||||||
|
return InvalidOpCode
|
||||||
|
}
|
||||||
|
return (op & ^jumpMask) | OpCode(jump)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op OpCode) String() string {
|
||||||
|
var f strings.Builder
|
||||||
|
|
||||||
|
switch class := op.Class(); class {
|
||||||
|
case LdClass, LdXClass, StClass, StXClass:
|
||||||
|
f.WriteString(strings.TrimSuffix(class.String(), "Class"))
|
||||||
|
|
||||||
|
mode := op.Mode()
|
||||||
|
f.WriteString(strings.TrimSuffix(mode.String(), "Mode"))
|
||||||
|
|
||||||
|
switch op.Size() {
|
||||||
|
case DWord:
|
||||||
|
f.WriteString("DW")
|
||||||
|
case Word:
|
||||||
|
f.WriteString("W")
|
||||||
|
case Half:
|
||||||
|
f.WriteString("H")
|
||||||
|
case Byte:
|
||||||
|
f.WriteString("B")
|
||||||
|
}
|
||||||
|
|
||||||
|
case ALU64Class, ALUClass:
|
||||||
|
f.WriteString(op.ALUOp().String())
|
||||||
|
|
||||||
|
if op.ALUOp() == Swap {
|
||||||
|
// Width for Endian is controlled by Constant
|
||||||
|
f.WriteString(op.Endianness().String())
|
||||||
|
} else {
|
||||||
|
if class == ALUClass {
|
||||||
|
f.WriteString("32")
|
||||||
|
}
|
||||||
|
|
||||||
|
f.WriteString(strings.TrimSuffix(op.Source().String(), "Source"))
|
||||||
|
}
|
||||||
|
|
||||||
|
case JumpClass:
|
||||||
|
f.WriteString(op.JumpOp().String())
|
||||||
|
if jop := op.JumpOp(); jop != Exit && jop != Call {
|
||||||
|
f.WriteString(strings.TrimSuffix(op.Source().String(), "Source"))
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
fmt.Fprintf(&f, "OpCode(%#x)", uint8(op))
|
||||||
|
}
|
||||||
|
|
||||||
|
return f.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// valid returns true if all bits in value are covered by mask.
|
||||||
|
func valid(value, mask OpCode) bool {
|
||||||
|
return value & ^mask == 0
|
||||||
|
}
|
|
@ -0,0 +1,38 @@
|
||||||
|
// Code generated by "stringer -output opcode_string.go -type=Class"; DO NOT EDIT.
|
||||||
|
|
||||||
|
package asm
|
||||||
|
|
||||||
|
import "strconv"
|
||||||
|
|
||||||
|
func _() {
|
||||||
|
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||||
|
// Re-run the stringer command to generate them again.
|
||||||
|
var x [1]struct{}
|
||||||
|
_ = x[LdClass-0]
|
||||||
|
_ = x[LdXClass-1]
|
||||||
|
_ = x[StClass-2]
|
||||||
|
_ = x[StXClass-3]
|
||||||
|
_ = x[ALUClass-4]
|
||||||
|
_ = x[JumpClass-5]
|
||||||
|
_ = x[ALU64Class-7]
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
_Class_name_0 = "LdClassLdXClassStClassStXClassALUClassJumpClass"
|
||||||
|
_Class_name_1 = "ALU64Class"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
_Class_index_0 = [...]uint8{0, 7, 15, 22, 30, 38, 47}
|
||||||
|
)
|
||||||
|
|
||||||
|
func (i Class) String() string {
|
||||||
|
switch {
|
||||||
|
case 0 <= i && i <= 5:
|
||||||
|
return _Class_name_0[_Class_index_0[i]:_Class_index_0[i+1]]
|
||||||
|
case i == 7:
|
||||||
|
return _Class_name_1
|
||||||
|
default:
|
||||||
|
return "Class(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,49 @@
|
||||||
|
package asm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Register is the source or destination of most operations.
|
||||||
|
type Register uint8
|
||||||
|
|
||||||
|
// R0 contains return values.
|
||||||
|
const R0 Register = 0
|
||||||
|
|
||||||
|
// Registers for function arguments.
|
||||||
|
const (
|
||||||
|
R1 Register = R0 + 1 + iota
|
||||||
|
R2
|
||||||
|
R3
|
||||||
|
R4
|
||||||
|
R5
|
||||||
|
)
|
||||||
|
|
||||||
|
// Callee saved registers preserved by function calls.
|
||||||
|
const (
|
||||||
|
R6 Register = R5 + 1 + iota
|
||||||
|
R7
|
||||||
|
R8
|
||||||
|
R9
|
||||||
|
)
|
||||||
|
|
||||||
|
// Read-only frame pointer to access stack.
|
||||||
|
const (
|
||||||
|
R10 Register = R9 + 1
|
||||||
|
RFP = R10
|
||||||
|
)
|
||||||
|
|
||||||
|
// Pseudo registers used by 64bit loads and jumps
|
||||||
|
const (
|
||||||
|
PseudoMapFD = R1 // BPF_PSEUDO_MAP_FD
|
||||||
|
PseudoMapValue = R2 // BPF_PSEUDO_MAP_VALUE
|
||||||
|
PseudoCall = R1 // BPF_PSEUDO_CALL
|
||||||
|
)
|
||||||
|
|
||||||
|
func (r Register) String() string {
|
||||||
|
v := uint8(r)
|
||||||
|
if v == 10 {
|
||||||
|
return "rfp"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("r%d", v)
|
||||||
|
}
|
|
@ -0,0 +1,589 @@
|
||||||
|
package ebpf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/cilium/ebpf/asm"
|
||||||
|
"github.com/cilium/ebpf/internal"
|
||||||
|
"github.com/cilium/ebpf/internal/btf"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CollectionOptions control loading a collection into the kernel.
|
||||||
|
//
|
||||||
|
// Maps and Programs are passed to NewMapWithOptions and NewProgramsWithOptions.
|
||||||
|
type CollectionOptions struct {
|
||||||
|
Maps MapOptions
|
||||||
|
Programs ProgramOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
// CollectionSpec describes a collection.
|
||||||
|
type CollectionSpec struct {
|
||||||
|
Maps map[string]*MapSpec
|
||||||
|
Programs map[string]*ProgramSpec
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy returns a recursive copy of the spec.
|
||||||
|
func (cs *CollectionSpec) Copy() *CollectionSpec {
|
||||||
|
if cs == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
cpy := CollectionSpec{
|
||||||
|
Maps: make(map[string]*MapSpec, len(cs.Maps)),
|
||||||
|
Programs: make(map[string]*ProgramSpec, len(cs.Programs)),
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, spec := range cs.Maps {
|
||||||
|
cpy.Maps[name] = spec.Copy()
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, spec := range cs.Programs {
|
||||||
|
cpy.Programs[name] = spec.Copy()
|
||||||
|
}
|
||||||
|
|
||||||
|
return &cpy
|
||||||
|
}
|
||||||
|
|
||||||
|
// RewriteMaps replaces all references to specific maps.
|
||||||
|
//
|
||||||
|
// Use this function to use pre-existing maps instead of creating new ones
|
||||||
|
// when calling NewCollection. Any named maps are removed from CollectionSpec.Maps.
|
||||||
|
//
|
||||||
|
// Returns an error if a named map isn't used in at least one program.
|
||||||
|
func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error {
|
||||||
|
for symbol, m := range maps {
|
||||||
|
// have we seen a program that uses this symbol / map
|
||||||
|
seen := false
|
||||||
|
fd := m.FD()
|
||||||
|
for progName, progSpec := range cs.Programs {
|
||||||
|
err := progSpec.Instructions.RewriteMapPtr(symbol, fd)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case err == nil:
|
||||||
|
seen = true
|
||||||
|
|
||||||
|
case asm.IsUnreferencedSymbol(err):
|
||||||
|
// Not all programs need to use the map
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("program %s: %w", progName, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !seen {
|
||||||
|
return fmt.Errorf("map %s not referenced by any programs", symbol)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prevent NewCollection from creating rewritten maps
|
||||||
|
delete(cs.Maps, symbol)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RewriteConstants replaces the value of multiple constants.
|
||||||
|
//
|
||||||
|
// The constant must be defined like so in the C program:
|
||||||
|
//
|
||||||
|
// static volatile const type foobar;
|
||||||
|
// static volatile const type foobar = default;
|
||||||
|
//
|
||||||
|
// Replacement values must be of the same length as the C sizeof(type).
|
||||||
|
// If necessary, they are marshalled according to the same rules as
|
||||||
|
// map values.
|
||||||
|
//
|
||||||
|
// From Linux 5.5 the verifier will use constants to eliminate dead code.
|
||||||
|
//
|
||||||
|
// Returns an error if a constant doesn't exist.
|
||||||
|
func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error {
|
||||||
|
rodata := cs.Maps[".rodata"]
|
||||||
|
if rodata == nil {
|
||||||
|
return errors.New("missing .rodata section")
|
||||||
|
}
|
||||||
|
|
||||||
|
if rodata.BTF == nil {
|
||||||
|
return errors.New(".rodata section has no BTF")
|
||||||
|
}
|
||||||
|
|
||||||
|
if n := len(rodata.Contents); n != 1 {
|
||||||
|
return fmt.Errorf("expected one key in .rodata, found %d", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
kv := rodata.Contents[0]
|
||||||
|
value, ok := kv.Value.([]byte)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("first value in .rodata is %T not []byte", kv.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := make([]byte, len(value))
|
||||||
|
copy(buf, value)
|
||||||
|
|
||||||
|
err := patchValue(buf, btf.MapValue(rodata.BTF), consts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
rodata.Contents[0] = MapKV{kv.Key, buf}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assign the contents of a CollectionSpec to a struct.
|
||||||
|
//
|
||||||
|
// This function is a short-cut to manually checking the presence
|
||||||
|
// of maps and programs in a collection spec. Consider using bpf2go if this
|
||||||
|
// sounds useful.
|
||||||
|
//
|
||||||
|
// The argument to must be a pointer to a struct. A field of the
|
||||||
|
// struct is updated with values from Programs or Maps if it
|
||||||
|
// has an `ebpf` tag and its type is *ProgramSpec or *MapSpec.
|
||||||
|
// The tag gives the name of the program or map as found in
|
||||||
|
// the CollectionSpec.
|
||||||
|
//
|
||||||
|
// struct {
|
||||||
|
// Foo *ebpf.ProgramSpec `ebpf:"xdp_foo"`
|
||||||
|
// Bar *ebpf.MapSpec `ebpf:"bar_map"`
|
||||||
|
// Ignored int
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Returns an error if any of the fields can't be found, or
|
||||||
|
// if the same map or program is assigned multiple times.
|
||||||
|
func (cs *CollectionSpec) Assign(to interface{}) error {
|
||||||
|
valueOf := func(typ reflect.Type, name string) (reflect.Value, error) {
|
||||||
|
switch typ {
|
||||||
|
case reflect.TypeOf((*ProgramSpec)(nil)):
|
||||||
|
p := cs.Programs[name]
|
||||||
|
if p == nil {
|
||||||
|
return reflect.Value{}, fmt.Errorf("missing program %q", name)
|
||||||
|
}
|
||||||
|
return reflect.ValueOf(p), nil
|
||||||
|
case reflect.TypeOf((*MapSpec)(nil)):
|
||||||
|
m := cs.Maps[name]
|
||||||
|
if m == nil {
|
||||||
|
return reflect.Value{}, fmt.Errorf("missing map %q", name)
|
||||||
|
}
|
||||||
|
return reflect.ValueOf(m), nil
|
||||||
|
default:
|
||||||
|
return reflect.Value{}, fmt.Errorf("unsupported type %s", typ)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return assignValues(to, valueOf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadAndAssign maps and programs into the kernel and assign them to a struct.
|
||||||
|
//
|
||||||
|
// This function is a short-cut to manually checking the presence
|
||||||
|
// of maps and programs in a collection spec. Consider using bpf2go if this
|
||||||
|
// sounds useful.
|
||||||
|
//
|
||||||
|
// The argument to must be a pointer to a struct. A field of the
|
||||||
|
// struct is updated with values from Programs or Maps if it
|
||||||
|
// has an `ebpf` tag and its type is *Program or *Map.
|
||||||
|
// The tag gives the name of the program or map as found in
|
||||||
|
// the CollectionSpec.
|
||||||
|
//
|
||||||
|
// struct {
|
||||||
|
// Foo *ebpf.Program `ebpf:"xdp_foo"`
|
||||||
|
// Bar *ebpf.Map `ebpf:"bar_map"`
|
||||||
|
// Ignored int
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// opts may be nil.
|
||||||
|
//
|
||||||
|
// Returns an error if any of the fields can't be found, or
|
||||||
|
// if the same map or program is assigned multiple times.
|
||||||
|
func (cs *CollectionSpec) LoadAndAssign(to interface{}, opts *CollectionOptions) error {
|
||||||
|
if opts == nil {
|
||||||
|
opts = &CollectionOptions{}
|
||||||
|
}
|
||||||
|
|
||||||
|
loadMap, loadProgram, done, cleanup := lazyLoadCollection(cs, opts)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
valueOf := func(typ reflect.Type, name string) (reflect.Value, error) {
|
||||||
|
switch typ {
|
||||||
|
case reflect.TypeOf((*Program)(nil)):
|
||||||
|
p, err := loadProgram(name)
|
||||||
|
if err != nil {
|
||||||
|
return reflect.Value{}, err
|
||||||
|
}
|
||||||
|
return reflect.ValueOf(p), nil
|
||||||
|
case reflect.TypeOf((*Map)(nil)):
|
||||||
|
m, err := loadMap(name)
|
||||||
|
if err != nil {
|
||||||
|
return reflect.Value{}, err
|
||||||
|
}
|
||||||
|
return reflect.ValueOf(m), nil
|
||||||
|
default:
|
||||||
|
return reflect.Value{}, fmt.Errorf("unsupported type %s", typ)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := assignValues(to, valueOf); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
done()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collection is a collection of Programs and Maps associated
|
||||||
|
// with their symbols
|
||||||
|
type Collection struct {
|
||||||
|
Programs map[string]*Program
|
||||||
|
Maps map[string]*Map
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCollection creates a Collection from a specification.
|
||||||
|
func NewCollection(spec *CollectionSpec) (*Collection, error) {
|
||||||
|
return NewCollectionWithOptions(spec, CollectionOptions{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCollectionWithOptions creates a Collection from a specification.
|
||||||
|
func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Collection, error) {
|
||||||
|
loadMap, loadProgram, done, cleanup := lazyLoadCollection(spec, &opts)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
for mapName := range spec.Maps {
|
||||||
|
_, err := loadMap(mapName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for progName := range spec.Programs {
|
||||||
|
_, err := loadProgram(progName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
maps, progs := done()
|
||||||
|
return &Collection{
|
||||||
|
progs,
|
||||||
|
maps,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type btfHandleCache map[*btf.Spec]*btf.Handle
|
||||||
|
|
||||||
|
func (btfs btfHandleCache) load(spec *btf.Spec) (*btf.Handle, error) {
|
||||||
|
if btfs[spec] != nil {
|
||||||
|
return btfs[spec], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
handle, err := btf.NewHandle(spec)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
btfs[spec] = handle
|
||||||
|
return handle, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (btfs btfHandleCache) close() {
|
||||||
|
for _, handle := range btfs {
|
||||||
|
handle.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func lazyLoadCollection(coll *CollectionSpec, opts *CollectionOptions) (
|
||||||
|
loadMap func(string) (*Map, error),
|
||||||
|
loadProgram func(string) (*Program, error),
|
||||||
|
done func() (map[string]*Map, map[string]*Program),
|
||||||
|
cleanup func(),
|
||||||
|
) {
|
||||||
|
var (
|
||||||
|
maps = make(map[string]*Map)
|
||||||
|
progs = make(map[string]*Program)
|
||||||
|
btfs = make(btfHandleCache)
|
||||||
|
skipMapsAndProgs = false
|
||||||
|
)
|
||||||
|
|
||||||
|
cleanup = func() {
|
||||||
|
btfs.close()
|
||||||
|
|
||||||
|
if skipMapsAndProgs {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, m := range maps {
|
||||||
|
m.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range progs {
|
||||||
|
p.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
done = func() (map[string]*Map, map[string]*Program) {
|
||||||
|
skipMapsAndProgs = true
|
||||||
|
return maps, progs
|
||||||
|
}
|
||||||
|
|
||||||
|
loadMap = func(mapName string) (*Map, error) {
|
||||||
|
if m := maps[mapName]; m != nil {
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
mapSpec := coll.Maps[mapName]
|
||||||
|
if mapSpec == nil {
|
||||||
|
return nil, fmt.Errorf("missing map %s", mapName)
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err := newMapWithOptions(mapSpec, opts.Maps, btfs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("map %s: %w", mapName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
maps[mapName] = m
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
loadProgram = func(progName string) (*Program, error) {
|
||||||
|
if prog := progs[progName]; prog != nil {
|
||||||
|
return prog, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
progSpec := coll.Programs[progName]
|
||||||
|
if progSpec == nil {
|
||||||
|
return nil, fmt.Errorf("unknown program %s", progName)
|
||||||
|
}
|
||||||
|
|
||||||
|
progSpec = progSpec.Copy()
|
||||||
|
|
||||||
|
// Rewrite any reference to a valid map.
|
||||||
|
for i := range progSpec.Instructions {
|
||||||
|
ins := &progSpec.Instructions[i]
|
||||||
|
|
||||||
|
if ins.OpCode != asm.LoadImmOp(asm.DWord) || ins.Reference == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if uint32(ins.Constant) != math.MaxUint32 {
|
||||||
|
// Don't overwrite maps already rewritten, users can
|
||||||
|
// rewrite programs in the spec themselves
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err := loadMap(ins.Reference)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("program %s: %s", progName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fd := m.FD()
|
||||||
|
if fd < 0 {
|
||||||
|
return nil, fmt.Errorf("map %s: %w", ins.Reference, internal.ErrClosedFd)
|
||||||
|
}
|
||||||
|
if err := ins.RewriteMapPtr(m.FD()); err != nil {
|
||||||
|
return nil, fmt.Errorf("progam %s: map %s: %w", progName, ins.Reference, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
prog, err := newProgramWithOptions(progSpec, opts.Programs, btfs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("program %s: %w", progName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
progs[progName] = prog
|
||||||
|
return prog, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadCollection parses an object file and converts it to a collection.
|
||||||
|
func LoadCollection(file string) (*Collection, error) {
|
||||||
|
spec, err := LoadCollectionSpec(file)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return NewCollection(spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close frees all maps and programs associated with the collection.
|
||||||
|
//
|
||||||
|
// The collection mustn't be used afterwards.
|
||||||
|
func (coll *Collection) Close() {
|
||||||
|
for _, prog := range coll.Programs {
|
||||||
|
prog.Close()
|
||||||
|
}
|
||||||
|
for _, m := range coll.Maps {
|
||||||
|
m.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DetachMap removes the named map from the Collection.
|
||||||
|
//
|
||||||
|
// This means that a later call to Close() will not affect this map.
|
||||||
|
//
|
||||||
|
// Returns nil if no map of that name exists.
|
||||||
|
func (coll *Collection) DetachMap(name string) *Map {
|
||||||
|
m := coll.Maps[name]
|
||||||
|
delete(coll.Maps, name)
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
// DetachProgram removes the named program from the Collection.
|
||||||
|
//
|
||||||
|
// This means that a later call to Close() will not affect this program.
|
||||||
|
//
|
||||||
|
// Returns nil if no program of that name exists.
|
||||||
|
func (coll *Collection) DetachProgram(name string) *Program {
|
||||||
|
p := coll.Programs[name]
|
||||||
|
delete(coll.Programs, name)
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assign the contents of a collection to a struct.
|
||||||
|
//
|
||||||
|
// Deprecated: use CollectionSpec.Assign instead. It provides the same
|
||||||
|
// functionality but creates only the maps and programs requested.
|
||||||
|
func (coll *Collection) Assign(to interface{}) error {
|
||||||
|
assignedMaps := make(map[string]struct{})
|
||||||
|
assignedPrograms := make(map[string]struct{})
|
||||||
|
valueOf := func(typ reflect.Type, name string) (reflect.Value, error) {
|
||||||
|
switch typ {
|
||||||
|
case reflect.TypeOf((*Program)(nil)):
|
||||||
|
p := coll.Programs[name]
|
||||||
|
if p == nil {
|
||||||
|
return reflect.Value{}, fmt.Errorf("missing program %q", name)
|
||||||
|
}
|
||||||
|
assignedPrograms[name] = struct{}{}
|
||||||
|
return reflect.ValueOf(p), nil
|
||||||
|
case reflect.TypeOf((*Map)(nil)):
|
||||||
|
m := coll.Maps[name]
|
||||||
|
if m == nil {
|
||||||
|
return reflect.Value{}, fmt.Errorf("missing map %q", name)
|
||||||
|
}
|
||||||
|
assignedMaps[name] = struct{}{}
|
||||||
|
return reflect.ValueOf(m), nil
|
||||||
|
default:
|
||||||
|
return reflect.Value{}, fmt.Errorf("unsupported type %s", typ)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := assignValues(to, valueOf); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for name := range assignedPrograms {
|
||||||
|
coll.DetachProgram(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
for name := range assignedMaps {
|
||||||
|
coll.DetachMap(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func assignValues(to interface{}, valueOf func(reflect.Type, string) (reflect.Value, error)) error {
|
||||||
|
type structField struct {
|
||||||
|
reflect.StructField
|
||||||
|
value reflect.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
fields []structField
|
||||||
|
visitedTypes = make(map[reflect.Type]bool)
|
||||||
|
flattenStruct func(reflect.Value) error
|
||||||
|
)
|
||||||
|
|
||||||
|
flattenStruct = func(structVal reflect.Value) error {
|
||||||
|
structType := structVal.Type()
|
||||||
|
if structType.Kind() != reflect.Struct {
|
||||||
|
return fmt.Errorf("%s is not a struct", structType)
|
||||||
|
}
|
||||||
|
|
||||||
|
if visitedTypes[structType] {
|
||||||
|
return fmt.Errorf("recursion on type %s", structType)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < structType.NumField(); i++ {
|
||||||
|
field := structField{structType.Field(i), structVal.Field(i)}
|
||||||
|
|
||||||
|
name := field.Tag.Get("ebpf")
|
||||||
|
if name != "" {
|
||||||
|
fields = append(fields, field)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
switch field.Type.Kind() {
|
||||||
|
case reflect.Ptr:
|
||||||
|
if field.Type.Elem().Kind() != reflect.Struct {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if field.value.IsNil() {
|
||||||
|
return fmt.Errorf("nil pointer to %s", structType)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = flattenStruct(field.value.Elem())
|
||||||
|
|
||||||
|
case reflect.Struct:
|
||||||
|
err = flattenStruct(field.value)
|
||||||
|
|
||||||
|
default:
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("field %s: %s", field.Name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
toValue := reflect.ValueOf(to)
|
||||||
|
if toValue.Type().Kind() != reflect.Ptr {
|
||||||
|
return fmt.Errorf("%T is not a pointer to struct", to)
|
||||||
|
}
|
||||||
|
|
||||||
|
if toValue.IsNil() {
|
||||||
|
return fmt.Errorf("nil pointer to %T", to)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := flattenStruct(toValue.Elem()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
type elem struct {
|
||||||
|
// Either *Map or *Program
|
||||||
|
typ reflect.Type
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
assignedTo := make(map[elem]string)
|
||||||
|
for _, field := range fields {
|
||||||
|
name := field.Tag.Get("ebpf")
|
||||||
|
if strings.Contains(name, ",") {
|
||||||
|
return fmt.Errorf("field %s: ebpf tag contains a comma", field.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
e := elem{field.Type, name}
|
||||||
|
if assignedField := assignedTo[e]; assignedField != "" {
|
||||||
|
return fmt.Errorf("field %s: %q was already assigned to %s", field.Name, name, assignedField)
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := valueOf(field.Type, name)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("field %s: %w", field.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !field.value.CanSet() {
|
||||||
|
return fmt.Errorf("field %s: can't set value", field.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
field.value.Set(value)
|
||||||
|
assignedTo[e] = field.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,16 @@
|
||||||
|
// Package ebpf is a toolkit for working with eBPF programs.
|
||||||
|
//
|
||||||
|
// eBPF programs are small snippets of code which are executed directly
|
||||||
|
// in a VM in the Linux kernel, which makes them very fast and flexible.
|
||||||
|
// Many Linux subsystems now accept eBPF programs. This makes it possible
|
||||||
|
// to implement highly application specific logic inside the kernel,
|
||||||
|
// without having to modify the actual kernel itself.
|
||||||
|
//
|
||||||
|
// This package is designed for long-running processes which
|
||||||
|
// want to use eBPF to implement part of their application logic. It has no
|
||||||
|
// run-time dependencies outside of the library and the Linux kernel itself.
|
||||||
|
// eBPF code should be compiled ahead of time using clang, and shipped with
|
||||||
|
// your application as any other resource.
|
||||||
|
//
|
||||||
|
// Use the link subpackage to attach a loaded program to a hook in the kernel.
|
||||||
|
package ebpf
|
|
@ -0,0 +1,930 @@
|
||||||
|
package ebpf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"debug/elf"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/cilium/ebpf/asm"
|
||||||
|
"github.com/cilium/ebpf/internal"
|
||||||
|
"github.com/cilium/ebpf/internal/btf"
|
||||||
|
"github.com/cilium/ebpf/internal/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// elfCode is a convenience to reduce the amount of arguments that have to
|
||||||
|
// be passed around explicitly. You should treat it's contents as immutable.
|
||||||
|
type elfCode struct {
|
||||||
|
*internal.SafeELFFile
|
||||||
|
sections map[elf.SectionIndex]*elfSection
|
||||||
|
license string
|
||||||
|
version uint32
|
||||||
|
btf *btf.Spec
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadCollectionSpec parses an ELF file into a CollectionSpec.
|
||||||
|
func LoadCollectionSpec(file string) (*CollectionSpec, error) {
|
||||||
|
f, err := os.Open(file)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
spec, err := LoadCollectionSpecFromReader(f)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("file %s: %w", file, err)
|
||||||
|
}
|
||||||
|
return spec, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadCollectionSpecFromReader parses an ELF file into a CollectionSpec.
|
||||||
|
func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
|
||||||
|
f, err := internal.NewSafeELFFile(rd)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
var (
|
||||||
|
licenseSection *elf.Section
|
||||||
|
versionSection *elf.Section
|
||||||
|
sections = make(map[elf.SectionIndex]*elfSection)
|
||||||
|
relSections = make(map[elf.SectionIndex]*elf.Section)
|
||||||
|
)
|
||||||
|
|
||||||
|
// This is the target of relocations generated by inline assembly.
|
||||||
|
sections[elf.SHN_UNDEF] = newElfSection(new(elf.Section), undefSection)
|
||||||
|
|
||||||
|
// Collect all the sections we're interested in. This includes relocations
|
||||||
|
// which we parse later.
|
||||||
|
for i, sec := range f.Sections {
|
||||||
|
idx := elf.SectionIndex(i)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case strings.HasPrefix(sec.Name, "license"):
|
||||||
|
licenseSection = sec
|
||||||
|
case strings.HasPrefix(sec.Name, "version"):
|
||||||
|
versionSection = sec
|
||||||
|
case strings.HasPrefix(sec.Name, "maps"):
|
||||||
|
sections[idx] = newElfSection(sec, mapSection)
|
||||||
|
case sec.Name == ".maps":
|
||||||
|
sections[idx] = newElfSection(sec, btfMapSection)
|
||||||
|
case sec.Name == ".bss" || sec.Name == ".data" || strings.HasPrefix(sec.Name, ".rodata"):
|
||||||
|
sections[idx] = newElfSection(sec, dataSection)
|
||||||
|
case sec.Type == elf.SHT_REL:
|
||||||
|
// Store relocations under the section index of the target
|
||||||
|
relSections[elf.SectionIndex(sec.Info)] = sec
|
||||||
|
case sec.Type == elf.SHT_PROGBITS && (sec.Flags&elf.SHF_EXECINSTR) != 0 && sec.Size > 0:
|
||||||
|
sections[idx] = newElfSection(sec, programSection)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
license, err := loadLicense(licenseSection)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("load license: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
version, err := loadVersion(versionSection, f.ByteOrder)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("load version: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
btfSpec, err := btf.LoadSpecFromReader(rd)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("load BTF: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assign symbols to all the sections we're interested in.
|
||||||
|
symbols, err := f.Symbols()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("load symbols: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, symbol := range symbols {
|
||||||
|
idx := symbol.Section
|
||||||
|
symType := elf.ST_TYPE(symbol.Info)
|
||||||
|
|
||||||
|
section := sections[idx]
|
||||||
|
if section == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Older versions of LLVM don't tag symbols correctly, so keep
|
||||||
|
// all NOTYPE ones.
|
||||||
|
keep := symType == elf.STT_NOTYPE
|
||||||
|
switch section.kind {
|
||||||
|
case mapSection, btfMapSection, dataSection:
|
||||||
|
keep = keep || symType == elf.STT_OBJECT
|
||||||
|
case programSection:
|
||||||
|
keep = keep || symType == elf.STT_FUNC
|
||||||
|
}
|
||||||
|
if !keep || symbol.Name == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
section.symbols[symbol.Value] = symbol
|
||||||
|
}
|
||||||
|
|
||||||
|
ec := &elfCode{
|
||||||
|
SafeELFFile: f,
|
||||||
|
sections: sections,
|
||||||
|
license: license,
|
||||||
|
version: version,
|
||||||
|
btf: btfSpec,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Go through relocation sections, and parse the ones for sections we're
|
||||||
|
// interested in. Make sure that relocations point at valid sections.
|
||||||
|
for idx, relSection := range relSections {
|
||||||
|
section := sections[idx]
|
||||||
|
if section == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
rels, err := ec.loadRelocations(relSection, symbols)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("relocation for section %q: %w", section.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, rel := range rels {
|
||||||
|
target := sections[rel.Section]
|
||||||
|
if target == nil {
|
||||||
|
return nil, fmt.Errorf("section %q: reference to %q in section %s: %w", section.Name, rel.Name, rel.Section, ErrNotSupported)
|
||||||
|
}
|
||||||
|
|
||||||
|
if target.Flags&elf.SHF_STRINGS > 0 {
|
||||||
|
return nil, fmt.Errorf("section %q: string %q is not stack allocated: %w", section.Name, rel.Name, ErrNotSupported)
|
||||||
|
}
|
||||||
|
|
||||||
|
target.references++
|
||||||
|
}
|
||||||
|
|
||||||
|
section.relocations = rels
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect all the various ways to define maps.
|
||||||
|
maps := make(map[string]*MapSpec)
|
||||||
|
if err := ec.loadMaps(maps); err != nil {
|
||||||
|
return nil, fmt.Errorf("load maps: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ec.loadBTFMaps(maps); err != nil {
|
||||||
|
return nil, fmt.Errorf("load BTF maps: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ec.loadDataSections(maps); err != nil {
|
||||||
|
return nil, fmt.Errorf("load data sections: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finally, collect programs and link them.
|
||||||
|
progs, err := ec.loadPrograms()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("load programs: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &CollectionSpec{maps, progs}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadLicense(sec *elf.Section) (string, error) {
|
||||||
|
if sec == nil {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := sec.Data()
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("section %s: %v", sec.Name, err)
|
||||||
|
}
|
||||||
|
return string(bytes.TrimRight(data, "\000")), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadVersion(sec *elf.Section, bo binary.ByteOrder) (uint32, error) {
|
||||||
|
if sec == nil {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var version uint32
|
||||||
|
if err := binary.Read(sec.Open(), bo, &version); err != nil {
|
||||||
|
return 0, fmt.Errorf("section %s: %v", sec.Name, err)
|
||||||
|
}
|
||||||
|
return version, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type elfSectionKind int
|
||||||
|
|
||||||
|
const (
|
||||||
|
undefSection elfSectionKind = iota
|
||||||
|
mapSection
|
||||||
|
btfMapSection
|
||||||
|
programSection
|
||||||
|
dataSection
|
||||||
|
)
|
||||||
|
|
||||||
|
type elfSection struct {
|
||||||
|
*elf.Section
|
||||||
|
kind elfSectionKind
|
||||||
|
// Offset from the start of the section to a symbol
|
||||||
|
symbols map[uint64]elf.Symbol
|
||||||
|
// Offset from the start of the section to a relocation, which points at
|
||||||
|
// a symbol in another section.
|
||||||
|
relocations map[uint64]elf.Symbol
|
||||||
|
// The number of relocations pointing at this section.
|
||||||
|
references int
|
||||||
|
}
|
||||||
|
|
||||||
|
func newElfSection(section *elf.Section, kind elfSectionKind) *elfSection {
|
||||||
|
return &elfSection{
|
||||||
|
section,
|
||||||
|
kind,
|
||||||
|
make(map[uint64]elf.Symbol),
|
||||||
|
make(map[uint64]elf.Symbol),
|
||||||
|
0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ec *elfCode) loadPrograms() (map[string]*ProgramSpec, error) {
|
||||||
|
var (
|
||||||
|
progs []*ProgramSpec
|
||||||
|
libs []*ProgramSpec
|
||||||
|
)
|
||||||
|
|
||||||
|
for _, sec := range ec.sections {
|
||||||
|
if sec.kind != programSection {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(sec.symbols) == 0 {
|
||||||
|
return nil, fmt.Errorf("section %v: missing symbols", sec.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
funcSym, ok := sec.symbols[0]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("section %v: no label at start", sec.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
insns, length, err := ec.loadInstructions(sec)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("program %s: %w", funcSym.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
progType, attachType, attachTo := getProgType(sec.Name)
|
||||||
|
|
||||||
|
spec := &ProgramSpec{
|
||||||
|
Name: funcSym.Name,
|
||||||
|
Type: progType,
|
||||||
|
AttachType: attachType,
|
||||||
|
AttachTo: attachTo,
|
||||||
|
License: ec.license,
|
||||||
|
KernelVersion: ec.version,
|
||||||
|
Instructions: insns,
|
||||||
|
ByteOrder: ec.ByteOrder,
|
||||||
|
}
|
||||||
|
|
||||||
|
if ec.btf != nil {
|
||||||
|
spec.BTF, err = ec.btf.Program(sec.Name, length)
|
||||||
|
if err != nil && !errors.Is(err, btf.ErrNoExtendedInfo) {
|
||||||
|
return nil, fmt.Errorf("program %s: %w", funcSym.Name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if spec.Type == UnspecifiedProgram {
|
||||||
|
// There is no single name we can use for "library" sections,
|
||||||
|
// since they may contain multiple functions. We'll decode the
|
||||||
|
// labels they contain later on, and then link sections that way.
|
||||||
|
libs = append(libs, spec)
|
||||||
|
} else {
|
||||||
|
progs = append(progs, spec)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
res := make(map[string]*ProgramSpec, len(progs))
|
||||||
|
for _, prog := range progs {
|
||||||
|
err := link(prog, libs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("program %s: %w", prog.Name, err)
|
||||||
|
}
|
||||||
|
res[prog.Name] = prog
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ec *elfCode) loadInstructions(section *elfSection) (asm.Instructions, uint64, error) {
|
||||||
|
var (
|
||||||
|
r = bufio.NewReader(section.Open())
|
||||||
|
insns asm.Instructions
|
||||||
|
offset uint64
|
||||||
|
)
|
||||||
|
for {
|
||||||
|
var ins asm.Instruction
|
||||||
|
n, err := ins.Unmarshal(r, ec.ByteOrder)
|
||||||
|
if err == io.EOF {
|
||||||
|
return insns, offset, nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, fmt.Errorf("offset %d: %w", offset, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ins.Symbol = section.symbols[offset].Name
|
||||||
|
|
||||||
|
if rel, ok := section.relocations[offset]; ok {
|
||||||
|
if err = ec.relocateInstruction(&ins, rel); err != nil {
|
||||||
|
return nil, 0, fmt.Errorf("offset %d: relocate instruction: %w", offset, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
insns = append(insns, ins)
|
||||||
|
offset += n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) error {
|
||||||
|
var (
|
||||||
|
typ = elf.ST_TYPE(rel.Info)
|
||||||
|
bind = elf.ST_BIND(rel.Info)
|
||||||
|
name = rel.Name
|
||||||
|
)
|
||||||
|
|
||||||
|
target := ec.sections[rel.Section]
|
||||||
|
|
||||||
|
switch target.kind {
|
||||||
|
case mapSection, btfMapSection:
|
||||||
|
if bind != elf.STB_GLOBAL {
|
||||||
|
return fmt.Errorf("possible erroneous static qualifier on map definition: found reference to %q", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if typ != elf.STT_OBJECT && typ != elf.STT_NOTYPE {
|
||||||
|
// STT_NOTYPE is generated on clang < 8 which doesn't tag
|
||||||
|
// relocations appropriately.
|
||||||
|
return fmt.Errorf("map load: incorrect relocation type %v", typ)
|
||||||
|
}
|
||||||
|
|
||||||
|
ins.Src = asm.PseudoMapFD
|
||||||
|
|
||||||
|
// Mark the instruction as needing an update when creating the
|
||||||
|
// collection.
|
||||||
|
if err := ins.RewriteMapPtr(-1); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
case dataSection:
|
||||||
|
switch typ {
|
||||||
|
case elf.STT_SECTION:
|
||||||
|
if bind != elf.STB_LOCAL {
|
||||||
|
return fmt.Errorf("direct load: %s: unsupported relocation %s", name, bind)
|
||||||
|
}
|
||||||
|
|
||||||
|
case elf.STT_OBJECT:
|
||||||
|
if bind != elf.STB_GLOBAL {
|
||||||
|
return fmt.Errorf("direct load: %s: unsupported relocation %s", name, bind)
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("incorrect relocation type %v for direct map load", typ)
|
||||||
|
}
|
||||||
|
|
||||||
|
// We rely on using the name of the data section as the reference. It
|
||||||
|
// would be nicer to keep the real name in case of an STT_OBJECT, but
|
||||||
|
// it's not clear how to encode that into Instruction.
|
||||||
|
name = target.Name
|
||||||
|
|
||||||
|
// For some reason, clang encodes the offset of the symbol its
|
||||||
|
// section in the first basic BPF instruction, while the kernel
|
||||||
|
// expects it in the second one.
|
||||||
|
ins.Constant <<= 32
|
||||||
|
ins.Src = asm.PseudoMapValue
|
||||||
|
|
||||||
|
// Mark the instruction as needing an update when creating the
|
||||||
|
// collection.
|
||||||
|
if err := ins.RewriteMapPtr(-1); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
case programSection:
|
||||||
|
if ins.OpCode.JumpOp() != asm.Call {
|
||||||
|
return fmt.Errorf("not a call instruction: %s", ins)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ins.Src != asm.PseudoCall {
|
||||||
|
return fmt.Errorf("call: %s: incorrect source register", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch typ {
|
||||||
|
case elf.STT_NOTYPE, elf.STT_FUNC:
|
||||||
|
if bind != elf.STB_GLOBAL {
|
||||||
|
return fmt.Errorf("call: %s: unsupported binding: %s", name, bind)
|
||||||
|
}
|
||||||
|
|
||||||
|
case elf.STT_SECTION:
|
||||||
|
if bind != elf.STB_LOCAL {
|
||||||
|
return fmt.Errorf("call: %s: unsupported binding: %s", name, bind)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The function we want to call is in the indicated section,
|
||||||
|
// at the offset encoded in the instruction itself. Reverse
|
||||||
|
// the calculation to find the real function we're looking for.
|
||||||
|
// A value of -1 references the first instruction in the section.
|
||||||
|
offset := int64(int32(ins.Constant)+1) * asm.InstructionSize
|
||||||
|
if offset < 0 {
|
||||||
|
return fmt.Errorf("call: %s: invalid offset %d", name, offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
sym, ok := target.symbols[uint64(offset)]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("call: %s: no symbol at offset %d", name, offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
ins.Constant = -1
|
||||||
|
name = sym.Name
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("call: %s: invalid symbol type %s", name, typ)
|
||||||
|
}
|
||||||
|
|
||||||
|
case undefSection:
|
||||||
|
if bind != elf.STB_GLOBAL {
|
||||||
|
return fmt.Errorf("asm relocation: %s: unsupported binding: %s", name, bind)
|
||||||
|
}
|
||||||
|
|
||||||
|
if typ != elf.STT_NOTYPE {
|
||||||
|
return fmt.Errorf("asm relocation: %s: unsupported type %s", name, typ)
|
||||||
|
}
|
||||||
|
|
||||||
|
// There is nothing to do here but set ins.Reference.
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("relocation to %q: %w", target.Name, ErrNotSupported)
|
||||||
|
}
|
||||||
|
|
||||||
|
ins.Reference = name
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ec *elfCode) loadMaps(maps map[string]*MapSpec) error {
|
||||||
|
for _, sec := range ec.sections {
|
||||||
|
if sec.kind != mapSection {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
nSym := len(sec.symbols)
|
||||||
|
if nSym == 0 {
|
||||||
|
return fmt.Errorf("section %v: no symbols", sec.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if sec.Size%uint64(nSym) != 0 {
|
||||||
|
return fmt.Errorf("section %v: map descriptors are not of equal size", sec.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
r = bufio.NewReader(sec.Open())
|
||||||
|
size = sec.Size / uint64(nSym)
|
||||||
|
)
|
||||||
|
for i, offset := 0, uint64(0); i < nSym; i, offset = i+1, offset+size {
|
||||||
|
mapSym, ok := sec.symbols[offset]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("section %s: missing symbol for map at offset %d", sec.Name, offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
if maps[mapSym.Name] != nil {
|
||||||
|
return fmt.Errorf("section %v: map %v already exists", sec.Name, mapSym)
|
||||||
|
}
|
||||||
|
|
||||||
|
lr := io.LimitReader(r, int64(size))
|
||||||
|
|
||||||
|
spec := MapSpec{
|
||||||
|
Name: SanitizeName(mapSym.Name, -1),
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case binary.Read(lr, ec.ByteOrder, &spec.Type) != nil:
|
||||||
|
return fmt.Errorf("map %v: missing type", mapSym)
|
||||||
|
case binary.Read(lr, ec.ByteOrder, &spec.KeySize) != nil:
|
||||||
|
return fmt.Errorf("map %v: missing key size", mapSym)
|
||||||
|
case binary.Read(lr, ec.ByteOrder, &spec.ValueSize) != nil:
|
||||||
|
return fmt.Errorf("map %v: missing value size", mapSym)
|
||||||
|
case binary.Read(lr, ec.ByteOrder, &spec.MaxEntries) != nil:
|
||||||
|
return fmt.Errorf("map %v: missing max entries", mapSym)
|
||||||
|
case binary.Read(lr, ec.ByteOrder, &spec.Flags) != nil:
|
||||||
|
return fmt.Errorf("map %v: missing flags", mapSym)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := io.Copy(internal.DiscardZeroes{}, lr); err != nil {
|
||||||
|
return fmt.Errorf("map %v: unknown and non-zero fields in definition", mapSym)
|
||||||
|
}
|
||||||
|
|
||||||
|
maps[mapSym.Name] = &spec
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error {
|
||||||
|
for _, sec := range ec.sections {
|
||||||
|
if sec.kind != btfMapSection {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if ec.btf == nil {
|
||||||
|
return fmt.Errorf("missing BTF")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(sec.symbols) == 0 {
|
||||||
|
return fmt.Errorf("section %v: no symbols", sec.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := io.Copy(internal.DiscardZeroes{}, bufio.NewReader(sec.Open()))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("section %v: initializing BTF map definitions: %w", sec.Name, internal.ErrNotSupported)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, sym := range sec.symbols {
|
||||||
|
name := sym.Name
|
||||||
|
if maps[name] != nil {
|
||||||
|
return fmt.Errorf("section %v: map %v already exists", sec.Name, sym)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A global Var is created by declaring a struct with a 'structure variable',
|
||||||
|
// as is common in eBPF C to declare eBPF maps. For example,
|
||||||
|
// `struct { ... } map_name ...;` emits a global variable `map_name`
|
||||||
|
// with the type of said struct (which can be anonymous).
|
||||||
|
var v btf.Var
|
||||||
|
if err := ec.btf.FindType(name, &v); err != nil {
|
||||||
|
return fmt.Errorf("cannot find global variable '%s' in BTF: %w", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mapStruct, ok := v.Type.(*btf.Struct)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("expected struct, got %s", v.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
mapSpec, err := mapSpecFromBTF(name, mapStruct, false, ec.btf)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("map %v: %w", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
maps[name] = mapSpec
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// mapSpecFromBTF produces a MapSpec based on a btf.Struct def representing
|
||||||
|
// a BTF map definition. The name and spec arguments will be copied to the
|
||||||
|
// resulting MapSpec, and inner must be true on any resursive invocations.
|
||||||
|
func mapSpecFromBTF(name string, def *btf.Struct, inner bool, spec *btf.Spec) (*MapSpec, error) {
|
||||||
|
|
||||||
|
var (
|
||||||
|
key, value btf.Type
|
||||||
|
keySize, valueSize uint32
|
||||||
|
mapType, flags, maxEntries uint32
|
||||||
|
pinType PinType
|
||||||
|
innerMapSpec *MapSpec
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
for i, member := range def.Members {
|
||||||
|
switch member.Name {
|
||||||
|
case "type":
|
||||||
|
mapType, err = uintFromBTF(member.Type)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't get type: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
case "map_flags":
|
||||||
|
flags, err = uintFromBTF(member.Type)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't get BTF map flags: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
case "max_entries":
|
||||||
|
maxEntries, err = uintFromBTF(member.Type)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't get BTF map max entries: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
case "key":
|
||||||
|
if keySize != 0 {
|
||||||
|
return nil, errors.New("both key and key_size given")
|
||||||
|
}
|
||||||
|
|
||||||
|
pk, ok := member.Type.(*btf.Pointer)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("key type is not a pointer: %T", member.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
key = pk.Target
|
||||||
|
|
||||||
|
size, err := btf.Sizeof(pk.Target)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't get size of BTF key: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
keySize = uint32(size)
|
||||||
|
|
||||||
|
case "value":
|
||||||
|
if valueSize != 0 {
|
||||||
|
return nil, errors.New("both value and value_size given")
|
||||||
|
}
|
||||||
|
|
||||||
|
vk, ok := member.Type.(*btf.Pointer)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("value type is not a pointer: %T", member.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
value = vk.Target
|
||||||
|
|
||||||
|
size, err := btf.Sizeof(vk.Target)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't get size of BTF value: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
valueSize = uint32(size)
|
||||||
|
|
||||||
|
case "key_size":
|
||||||
|
// Key needs to be nil and keySize needs to be 0 for key_size to be
|
||||||
|
// considered a valid member.
|
||||||
|
if key != nil || keySize != 0 {
|
||||||
|
return nil, errors.New("both key and key_size given")
|
||||||
|
}
|
||||||
|
|
||||||
|
keySize, err = uintFromBTF(member.Type)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't get BTF key size: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
case "value_size":
|
||||||
|
// Value needs to be nil and valueSize needs to be 0 for value_size to be
|
||||||
|
// considered a valid member.
|
||||||
|
if value != nil || valueSize != 0 {
|
||||||
|
return nil, errors.New("both value and value_size given")
|
||||||
|
}
|
||||||
|
|
||||||
|
valueSize, err = uintFromBTF(member.Type)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't get BTF value size: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
case "pinning":
|
||||||
|
if inner {
|
||||||
|
return nil, errors.New("inner maps can't be pinned")
|
||||||
|
}
|
||||||
|
|
||||||
|
pinning, err := uintFromBTF(member.Type)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't get pinning: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pinType = PinType(pinning)
|
||||||
|
|
||||||
|
case "values":
|
||||||
|
// The 'values' field in BTF map definitions is used for declaring map
|
||||||
|
// value types that are references to other BPF objects, like other maps
|
||||||
|
// or programs. It is always expected to be an array of pointers.
|
||||||
|
if i != len(def.Members)-1 {
|
||||||
|
return nil, errors.New("'values' must be the last member in a BTF map definition")
|
||||||
|
}
|
||||||
|
|
||||||
|
if valueSize != 0 && valueSize != 4 {
|
||||||
|
return nil, errors.New("value_size must be 0 or 4")
|
||||||
|
}
|
||||||
|
valueSize = 4
|
||||||
|
|
||||||
|
valueType, err := resolveBTFArrayMacro(member.Type)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't resolve type of member 'values': %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch t := valueType.(type) {
|
||||||
|
case *btf.Struct:
|
||||||
|
// The values member pointing to an array of structs means we're expecting
|
||||||
|
// a map-in-map declaration.
|
||||||
|
if MapType(mapType) != ArrayOfMaps && MapType(mapType) != HashOfMaps {
|
||||||
|
return nil, errors.New("outer map needs to be an array or a hash of maps")
|
||||||
|
}
|
||||||
|
if inner {
|
||||||
|
return nil, fmt.Errorf("nested inner maps are not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
// This inner map spec is used as a map template, but it needs to be
|
||||||
|
// created as a traditional map before it can be used to do so.
|
||||||
|
// libbpf names the inner map template '<outer_name>.inner', but we
|
||||||
|
// opted for _inner to simplify validation logic. (dots only supported
|
||||||
|
// on kernels 5.2 and up)
|
||||||
|
// Pass the BTF spec from the parent object, since both parent and
|
||||||
|
// child must be created from the same BTF blob (on kernels that support BTF).
|
||||||
|
innerMapSpec, err = mapSpecFromBTF(name+"_inner", t, true, spec)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't parse BTF map definition of inner map: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unsupported value type %q in 'values' field", t)
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unrecognized field %s in BTF map definition", member.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bm := btf.NewMap(spec, key, value)
|
||||||
|
|
||||||
|
return &MapSpec{
|
||||||
|
Name: SanitizeName(name, -1),
|
||||||
|
Type: MapType(mapType),
|
||||||
|
KeySize: keySize,
|
||||||
|
ValueSize: valueSize,
|
||||||
|
MaxEntries: maxEntries,
|
||||||
|
Flags: flags,
|
||||||
|
BTF: &bm,
|
||||||
|
Pinning: pinType,
|
||||||
|
InnerMap: innerMapSpec,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// uintFromBTF resolves the __uint macro, which is a pointer to a sized
|
||||||
|
// array, e.g. for int (*foo)[10], this function will return 10.
|
||||||
|
func uintFromBTF(typ btf.Type) (uint32, error) {
|
||||||
|
ptr, ok := typ.(*btf.Pointer)
|
||||||
|
if !ok {
|
||||||
|
return 0, fmt.Errorf("not a pointer: %v", typ)
|
||||||
|
}
|
||||||
|
|
||||||
|
arr, ok := ptr.Target.(*btf.Array)
|
||||||
|
if !ok {
|
||||||
|
return 0, fmt.Errorf("not a pointer to array: %v", typ)
|
||||||
|
}
|
||||||
|
|
||||||
|
return arr.Nelems, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// resolveBTFArrayMacro resolves the __array macro, which declares an array
|
||||||
|
// of pointers to a given type. This function returns the target Type of
|
||||||
|
// the pointers in the array.
|
||||||
|
func resolveBTFArrayMacro(typ btf.Type) (btf.Type, error) {
|
||||||
|
arr, ok := typ.(*btf.Array)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("not an array: %v", typ)
|
||||||
|
}
|
||||||
|
|
||||||
|
ptr, ok := arr.Type.(*btf.Pointer)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("not an array of pointers: %v", typ)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ptr.Target, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ec *elfCode) loadDataSections(maps map[string]*MapSpec) error {
|
||||||
|
for _, sec := range ec.sections {
|
||||||
|
if sec.kind != dataSection {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if sec.references == 0 {
|
||||||
|
// Prune data sections which are not referenced by any
|
||||||
|
// instructions.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if ec.btf == nil {
|
||||||
|
return errors.New("data sections require BTF, make sure all consts are marked as static")
|
||||||
|
}
|
||||||
|
|
||||||
|
btfMap, err := ec.btf.Datasec(sec.Name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := sec.Data()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("data section %s: can't get contents: %w", sec.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if uint64(len(data)) > math.MaxUint32 {
|
||||||
|
return fmt.Errorf("data section %s: contents exceed maximum size", sec.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
mapSpec := &MapSpec{
|
||||||
|
Name: SanitizeName(sec.Name, -1),
|
||||||
|
Type: Array,
|
||||||
|
KeySize: 4,
|
||||||
|
ValueSize: uint32(len(data)),
|
||||||
|
MaxEntries: 1,
|
||||||
|
Contents: []MapKV{{uint32(0), data}},
|
||||||
|
BTF: btfMap,
|
||||||
|
}
|
||||||
|
|
||||||
|
switch sec.Name {
|
||||||
|
case ".rodata":
|
||||||
|
mapSpec.Flags = unix.BPF_F_RDONLY_PROG
|
||||||
|
mapSpec.Freeze = true
|
||||||
|
case ".bss":
|
||||||
|
// The kernel already zero-initializes the map
|
||||||
|
mapSpec.Contents = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
maps[sec.Name] = mapSpec
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getProgType(sectionName string) (ProgramType, AttachType, string) {
|
||||||
|
types := map[string]struct {
|
||||||
|
progType ProgramType
|
||||||
|
attachType AttachType
|
||||||
|
}{
|
||||||
|
// From https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/lib/bpf/libbpf.c
|
||||||
|
"socket": {SocketFilter, AttachNone},
|
||||||
|
"seccomp": {SocketFilter, AttachNone},
|
||||||
|
"kprobe/": {Kprobe, AttachNone},
|
||||||
|
"uprobe/": {Kprobe, AttachNone},
|
||||||
|
"kretprobe/": {Kprobe, AttachNone},
|
||||||
|
"uretprobe/": {Kprobe, AttachNone},
|
||||||
|
"tracepoint/": {TracePoint, AttachNone},
|
||||||
|
"raw_tracepoint/": {RawTracepoint, AttachNone},
|
||||||
|
"xdp": {XDP, AttachNone},
|
||||||
|
"perf_event": {PerfEvent, AttachNone},
|
||||||
|
"lwt_in": {LWTIn, AttachNone},
|
||||||
|
"lwt_out": {LWTOut, AttachNone},
|
||||||
|
"lwt_xmit": {LWTXmit, AttachNone},
|
||||||
|
"lwt_seg6local": {LWTSeg6Local, AttachNone},
|
||||||
|
"sockops": {SockOps, AttachCGroupSockOps},
|
||||||
|
"sk_skb/stream_parser": {SkSKB, AttachSkSKBStreamParser},
|
||||||
|
"sk_skb/stream_verdict": {SkSKB, AttachSkSKBStreamParser},
|
||||||
|
"sk_msg": {SkMsg, AttachSkSKBStreamVerdict},
|
||||||
|
"lirc_mode2": {LircMode2, AttachLircMode2},
|
||||||
|
"flow_dissector": {FlowDissector, AttachFlowDissector},
|
||||||
|
"iter/": {Tracing, AttachTraceIter},
|
||||||
|
"sk_lookup/": {SkLookup, AttachSkLookup},
|
||||||
|
"lsm/": {LSM, AttachLSMMac},
|
||||||
|
|
||||||
|
"cgroup_skb/ingress": {CGroupSKB, AttachCGroupInetIngress},
|
||||||
|
"cgroup_skb/egress": {CGroupSKB, AttachCGroupInetEgress},
|
||||||
|
"cgroup/dev": {CGroupDevice, AttachCGroupDevice},
|
||||||
|
"cgroup/skb": {CGroupSKB, AttachNone},
|
||||||
|
"cgroup/sock": {CGroupSock, AttachCGroupInetSockCreate},
|
||||||
|
"cgroup/post_bind4": {CGroupSock, AttachCGroupInet4PostBind},
|
||||||
|
"cgroup/post_bind6": {CGroupSock, AttachCGroupInet6PostBind},
|
||||||
|
"cgroup/bind4": {CGroupSockAddr, AttachCGroupInet4Bind},
|
||||||
|
"cgroup/bind6": {CGroupSockAddr, AttachCGroupInet6Bind},
|
||||||
|
"cgroup/connect4": {CGroupSockAddr, AttachCGroupInet4Connect},
|
||||||
|
"cgroup/connect6": {CGroupSockAddr, AttachCGroupInet6Connect},
|
||||||
|
"cgroup/sendmsg4": {CGroupSockAddr, AttachCGroupUDP4Sendmsg},
|
||||||
|
"cgroup/sendmsg6": {CGroupSockAddr, AttachCGroupUDP6Sendmsg},
|
||||||
|
"cgroup/recvmsg4": {CGroupSockAddr, AttachCGroupUDP4Recvmsg},
|
||||||
|
"cgroup/recvmsg6": {CGroupSockAddr, AttachCGroupUDP6Recvmsg},
|
||||||
|
"cgroup/sysctl": {CGroupSysctl, AttachCGroupSysctl},
|
||||||
|
"cgroup/getsockopt": {CGroupSockopt, AttachCGroupGetsockopt},
|
||||||
|
"cgroup/setsockopt": {CGroupSockopt, AttachCGroupSetsockopt},
|
||||||
|
"classifier": {SchedCLS, AttachNone},
|
||||||
|
"action": {SchedACT, AttachNone},
|
||||||
|
}
|
||||||
|
|
||||||
|
for prefix, t := range types {
|
||||||
|
if !strings.HasPrefix(sectionName, prefix) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.HasSuffix(prefix, "/") {
|
||||||
|
return t.progType, t.attachType, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.progType, t.attachType, sectionName[len(prefix):]
|
||||||
|
}
|
||||||
|
|
||||||
|
return UnspecifiedProgram, AttachNone, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ec *elfCode) loadRelocations(sec *elf.Section, symbols []elf.Symbol) (map[uint64]elf.Symbol, error) {
|
||||||
|
rels := make(map[uint64]elf.Symbol)
|
||||||
|
|
||||||
|
if sec.Entsize < 16 {
|
||||||
|
return nil, fmt.Errorf("section %s: relocations are less than 16 bytes", sec.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
r := bufio.NewReader(sec.Open())
|
||||||
|
for off := uint64(0); off < sec.Size; off += sec.Entsize {
|
||||||
|
ent := io.LimitReader(r, int64(sec.Entsize))
|
||||||
|
|
||||||
|
var rel elf.Rel64
|
||||||
|
if binary.Read(ent, ec.ByteOrder, &rel) != nil {
|
||||||
|
return nil, fmt.Errorf("can't parse relocation at offset %v", off)
|
||||||
|
}
|
||||||
|
|
||||||
|
symNo := int(elf.R_SYM64(rel.Info) - 1)
|
||||||
|
if symNo >= len(symbols) {
|
||||||
|
return nil, fmt.Errorf("offset %d: symbol %d doesn't exist", off, symNo)
|
||||||
|
}
|
||||||
|
|
||||||
|
symbol := symbols[symNo]
|
||||||
|
rels[rel.Off] = symbol
|
||||||
|
}
|
||||||
|
|
||||||
|
return rels, nil
|
||||||
|
}
|
|
@ -0,0 +1,21 @@
|
||||||
|
// +build gofuzz
|
||||||
|
|
||||||
|
// Use with https://github.com/dvyukov/go-fuzz
|
||||||
|
|
||||||
|
package ebpf
|
||||||
|
|
||||||
|
import "bytes"
|
||||||
|
|
||||||
|
func FuzzLoadCollectionSpec(data []byte) int {
|
||||||
|
spec, err := LoadCollectionSpecFromReader(bytes.NewReader(data))
|
||||||
|
if err != nil {
|
||||||
|
if spec != nil {
|
||||||
|
panic("spec is not nil")
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
if spec == nil {
|
||||||
|
panic("spec is nil")
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
|
@ -0,0 +1,239 @@
|
||||||
|
package ebpf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/cilium/ebpf/internal"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MapInfo describes a map.
|
||||||
|
type MapInfo struct {
|
||||||
|
Type MapType
|
||||||
|
id MapID
|
||||||
|
KeySize uint32
|
||||||
|
ValueSize uint32
|
||||||
|
MaxEntries uint32
|
||||||
|
Flags uint32
|
||||||
|
// Name as supplied by user space at load time.
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMapInfoFromFd(fd *internal.FD) (*MapInfo, error) {
|
||||||
|
info, err := bpfGetMapInfoByFD(fd)
|
||||||
|
if errors.Is(err, syscall.EINVAL) {
|
||||||
|
return newMapInfoFromProc(fd)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &MapInfo{
|
||||||
|
MapType(info.map_type),
|
||||||
|
MapID(info.id),
|
||||||
|
info.key_size,
|
||||||
|
info.value_size,
|
||||||
|
info.max_entries,
|
||||||
|
info.map_flags,
|
||||||
|
// name is available from 4.15.
|
||||||
|
internal.CString(info.name[:]),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMapInfoFromProc(fd *internal.FD) (*MapInfo, error) {
|
||||||
|
var mi MapInfo
|
||||||
|
err := scanFdInfo(fd, map[string]interface{}{
|
||||||
|
"map_type": &mi.Type,
|
||||||
|
"key_size": &mi.KeySize,
|
||||||
|
"value_size": &mi.ValueSize,
|
||||||
|
"max_entries": &mi.MaxEntries,
|
||||||
|
"map_flags": &mi.Flags,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &mi, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ID returns the map ID.
|
||||||
|
//
|
||||||
|
// Available from 4.13.
|
||||||
|
//
|
||||||
|
// The bool return value indicates whether this optional field is available.
|
||||||
|
func (mi *MapInfo) ID() (MapID, bool) {
|
||||||
|
return mi.id, mi.id > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// programStats holds statistics of a program.
|
||||||
|
type programStats struct {
|
||||||
|
// Total accumulated runtime of the program ins ns.
|
||||||
|
runtime time.Duration
|
||||||
|
// Total number of times the program was called.
|
||||||
|
runCount uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProgramInfo describes a program.
|
||||||
|
type ProgramInfo struct {
|
||||||
|
Type ProgramType
|
||||||
|
id ProgramID
|
||||||
|
// Truncated hash of the BPF bytecode.
|
||||||
|
Tag string
|
||||||
|
// Name as supplied by user space at load time.
|
||||||
|
Name string
|
||||||
|
|
||||||
|
stats *programStats
|
||||||
|
}
|
||||||
|
|
||||||
|
func newProgramInfoFromFd(fd *internal.FD) (*ProgramInfo, error) {
|
||||||
|
info, err := bpfGetProgInfoByFD(fd)
|
||||||
|
if errors.Is(err, syscall.EINVAL) {
|
||||||
|
return newProgramInfoFromProc(fd)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ProgramInfo{
|
||||||
|
Type: ProgramType(info.prog_type),
|
||||||
|
id: ProgramID(info.id),
|
||||||
|
// tag is available if the kernel supports BPF_PROG_GET_INFO_BY_FD.
|
||||||
|
Tag: hex.EncodeToString(info.tag[:]),
|
||||||
|
// name is available from 4.15.
|
||||||
|
Name: internal.CString(info.name[:]),
|
||||||
|
stats: &programStats{
|
||||||
|
runtime: time.Duration(info.run_time_ns),
|
||||||
|
runCount: info.run_cnt,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newProgramInfoFromProc(fd *internal.FD) (*ProgramInfo, error) {
|
||||||
|
var info ProgramInfo
|
||||||
|
err := scanFdInfo(fd, map[string]interface{}{
|
||||||
|
"prog_type": &info.Type,
|
||||||
|
"prog_tag": &info.Tag,
|
||||||
|
})
|
||||||
|
if errors.Is(err, errMissingFields) {
|
||||||
|
return nil, &internal.UnsupportedFeatureError{
|
||||||
|
Name: "reading program info from /proc/self/fdinfo",
|
||||||
|
MinimumVersion: internal.Version{4, 10, 0},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ID returns the program ID.
|
||||||
|
//
|
||||||
|
// Available from 4.13.
|
||||||
|
//
|
||||||
|
// The bool return value indicates whether this optional field is available.
|
||||||
|
func (pi *ProgramInfo) ID() (ProgramID, bool) {
|
||||||
|
return pi.id, pi.id > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunCount returns the total number of times the program was called.
|
||||||
|
//
|
||||||
|
// Can return 0 if the collection of statistics is not enabled. See EnableStats().
|
||||||
|
// The bool return value indicates whether this optional field is available.
|
||||||
|
func (pi *ProgramInfo) RunCount() (uint64, bool) {
|
||||||
|
if pi.stats != nil {
|
||||||
|
return pi.stats.runCount, true
|
||||||
|
}
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Runtime returns the total accumulated runtime of the program.
|
||||||
|
//
|
||||||
|
// Can return 0 if the collection of statistics is not enabled. See EnableStats().
|
||||||
|
// The bool return value indicates whether this optional field is available.
|
||||||
|
func (pi *ProgramInfo) Runtime() (time.Duration, bool) {
|
||||||
|
if pi.stats != nil {
|
||||||
|
return pi.stats.runtime, true
|
||||||
|
}
|
||||||
|
return time.Duration(0), false
|
||||||
|
}
|
||||||
|
|
||||||
|
func scanFdInfo(fd *internal.FD, fields map[string]interface{}) error {
|
||||||
|
raw, err := fd.Value()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fh, err := os.Open(fmt.Sprintf("/proc/self/fdinfo/%d", raw))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer fh.Close()
|
||||||
|
|
||||||
|
if err := scanFdInfoReader(fh, fields); err != nil {
|
||||||
|
return fmt.Errorf("%s: %w", fh.Name(), err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var errMissingFields = errors.New("missing fields")
|
||||||
|
|
||||||
|
func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error {
|
||||||
|
var (
|
||||||
|
scanner = bufio.NewScanner(r)
|
||||||
|
scanned int
|
||||||
|
)
|
||||||
|
|
||||||
|
for scanner.Scan() {
|
||||||
|
parts := strings.SplitN(scanner.Text(), "\t", 2)
|
||||||
|
if len(parts) != 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
name := strings.TrimSuffix(parts[0], ":")
|
||||||
|
field, ok := fields[string(name)]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if n, err := fmt.Sscanln(parts[1], field); err != nil || n != 1 {
|
||||||
|
return fmt.Errorf("can't parse field %s: %v", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
scanned++
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if scanned != len(fields) {
|
||||||
|
return errMissingFields
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnableStats starts the measuring of the runtime
|
||||||
|
// and run counts of eBPF programs.
|
||||||
|
//
|
||||||
|
// Collecting statistics can have an impact on the performance.
|
||||||
|
//
|
||||||
|
// Requires at least 5.8.
|
||||||
|
func EnableStats(which uint32) (io.Closer, error) {
|
||||||
|
attr := internal.BPFEnableStatsAttr{
|
||||||
|
StatsType: which,
|
||||||
|
}
|
||||||
|
|
||||||
|
fd, err := internal.BPFEnableStats(&attr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return fd, nil
|
||||||
|
}
|
|
@ -0,0 +1,791 @@
|
||||||
|
package btf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"debug/elf"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"sync"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/cilium/ebpf/internal"
|
||||||
|
"github.com/cilium/ebpf/internal/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
const btfMagic = 0xeB9F
|
||||||
|
|
||||||
|
// Errors returned by BTF functions.
|
||||||
|
var (
|
||||||
|
ErrNotSupported = internal.ErrNotSupported
|
||||||
|
ErrNotFound = errors.New("not found")
|
||||||
|
ErrNoExtendedInfo = errors.New("no extended info")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Spec represents decoded BTF.
|
||||||
|
type Spec struct {
|
||||||
|
rawTypes []rawType
|
||||||
|
strings stringTable
|
||||||
|
types []Type
|
||||||
|
namedTypes map[string][]namedType
|
||||||
|
funcInfos map[string]extInfo
|
||||||
|
lineInfos map[string]extInfo
|
||||||
|
coreRelos map[string]bpfCoreRelos
|
||||||
|
byteOrder binary.ByteOrder
|
||||||
|
}
|
||||||
|
|
||||||
|
type btfHeader struct {
|
||||||
|
Magic uint16
|
||||||
|
Version uint8
|
||||||
|
Flags uint8
|
||||||
|
HdrLen uint32
|
||||||
|
|
||||||
|
TypeOff uint32
|
||||||
|
TypeLen uint32
|
||||||
|
StringOff uint32
|
||||||
|
StringLen uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadSpecFromReader reads BTF sections from an ELF.
|
||||||
|
//
|
||||||
|
// Returns a nil Spec and no error if no BTF was present.
|
||||||
|
func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
|
||||||
|
file, err := internal.NewSafeELFFile(rd)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
btfSection, btfExtSection, sectionSizes, err := findBtfSections(file)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if btfSection == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
symbols, err := file.Symbols()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't read symbols: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
variableOffsets := make(map[variable]uint32)
|
||||||
|
for _, symbol := range symbols {
|
||||||
|
if idx := symbol.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE {
|
||||||
|
// Ignore things like SHN_ABS
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if int(symbol.Section) >= len(file.Sections) {
|
||||||
|
return nil, fmt.Errorf("symbol %s: invalid section %d", symbol.Name, symbol.Section)
|
||||||
|
}
|
||||||
|
|
||||||
|
secName := file.Sections[symbol.Section].Name
|
||||||
|
if _, ok := sectionSizes[secName]; !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if symbol.Value > math.MaxUint32 {
|
||||||
|
return nil, fmt.Errorf("section %s: symbol %s: size exceeds maximum", secName, symbol.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
variableOffsets[variable{secName, symbol.Name}] = uint32(symbol.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
spec, err := loadNakedSpec(btfSection.Open(), file.ByteOrder, sectionSizes, variableOffsets)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if btfExtSection == nil {
|
||||||
|
return spec, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
spec.funcInfos, spec.lineInfos, spec.coreRelos, err = parseExtInfos(btfExtSection.Open(), file.ByteOrder, spec.strings)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't read ext info: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return spec, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func findBtfSections(file *internal.SafeELFFile) (*elf.Section, *elf.Section, map[string]uint32, error) {
|
||||||
|
var (
|
||||||
|
btfSection *elf.Section
|
||||||
|
btfExtSection *elf.Section
|
||||||
|
sectionSizes = make(map[string]uint32)
|
||||||
|
)
|
||||||
|
|
||||||
|
for _, sec := range file.Sections {
|
||||||
|
switch sec.Name {
|
||||||
|
case ".BTF":
|
||||||
|
btfSection = sec
|
||||||
|
case ".BTF.ext":
|
||||||
|
btfExtSection = sec
|
||||||
|
default:
|
||||||
|
if sec.Type != elf.SHT_PROGBITS && sec.Type != elf.SHT_NOBITS {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if sec.Size > math.MaxUint32 {
|
||||||
|
return nil, nil, nil, fmt.Errorf("section %s exceeds maximum size", sec.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
sectionSizes[sec.Name] = uint32(sec.Size)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return btfSection, btfExtSection, sectionSizes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadSpecFromVmlinux(rd io.ReaderAt) (*Spec, error) {
|
||||||
|
file, err := internal.NewSafeELFFile(rd)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
btfSection, _, _, err := findBtfSections(file)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf(".BTF ELF section: %s", err)
|
||||||
|
}
|
||||||
|
if btfSection == nil {
|
||||||
|
return nil, fmt.Errorf("unable to find .BTF ELF section")
|
||||||
|
}
|
||||||
|
return loadNakedSpec(btfSection.Open(), file.ByteOrder, nil, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadNakedSpec(btf io.ReadSeeker, bo binary.ByteOrder, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) (*Spec, error) {
|
||||||
|
rawTypes, rawStrings, err := parseBTF(btf, bo)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = fixupDatasec(rawTypes, rawStrings, sectionSizes, variableOffsets)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
types, typesByName, err := inflateRawTypes(rawTypes, rawStrings)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Spec{
|
||||||
|
rawTypes: rawTypes,
|
||||||
|
namedTypes: typesByName,
|
||||||
|
types: types,
|
||||||
|
strings: rawStrings,
|
||||||
|
byteOrder: bo,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var kernelBTF struct {
|
||||||
|
sync.Mutex
|
||||||
|
*Spec
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadKernelSpec returns the current kernel's BTF information.
|
||||||
|
//
|
||||||
|
// Requires a >= 5.5 kernel with CONFIG_DEBUG_INFO_BTF enabled. Returns
|
||||||
|
// ErrNotSupported if BTF is not enabled.
|
||||||
|
func LoadKernelSpec() (*Spec, error) {
|
||||||
|
kernelBTF.Lock()
|
||||||
|
defer kernelBTF.Unlock()
|
||||||
|
|
||||||
|
if kernelBTF.Spec != nil {
|
||||||
|
return kernelBTF.Spec, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
kernelBTF.Spec, err = loadKernelSpec()
|
||||||
|
return kernelBTF.Spec, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadKernelSpec() (*Spec, error) {
|
||||||
|
release, err := unix.KernelRelease()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't read kernel release number: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fh, err := os.Open("/sys/kernel/btf/vmlinux")
|
||||||
|
if err == nil {
|
||||||
|
defer fh.Close()
|
||||||
|
|
||||||
|
return loadNakedSpec(fh, internal.NativeEndian, nil, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// use same list of locations as libbpf
|
||||||
|
// https://github.com/libbpf/libbpf/blob/9a3a42608dbe3731256a5682a125ac1e23bced8f/src/btf.c#L3114-L3122
|
||||||
|
locations := []string{
|
||||||
|
"/boot/vmlinux-%s",
|
||||||
|
"/lib/modules/%s/vmlinux-%[1]s",
|
||||||
|
"/lib/modules/%s/build/vmlinux",
|
||||||
|
"/usr/lib/modules/%s/kernel/vmlinux",
|
||||||
|
"/usr/lib/debug/boot/vmlinux-%s",
|
||||||
|
"/usr/lib/debug/boot/vmlinux-%s.debug",
|
||||||
|
"/usr/lib/debug/lib/modules/%s/vmlinux",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, loc := range locations {
|
||||||
|
path := fmt.Sprintf(loc, release)
|
||||||
|
|
||||||
|
fh, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
defer fh.Close()
|
||||||
|
|
||||||
|
return loadSpecFromVmlinux(fh)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("no BTF for kernel version %s: %w", release, internal.ErrNotSupported)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseBTF(btf io.ReadSeeker, bo binary.ByteOrder) ([]rawType, stringTable, error) {
|
||||||
|
rawBTF, err := ioutil.ReadAll(btf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("can't read BTF: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rd := bytes.NewReader(rawBTF)
|
||||||
|
|
||||||
|
var header btfHeader
|
||||||
|
if err := binary.Read(rd, bo, &header); err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("can't read header: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if header.Magic != btfMagic {
|
||||||
|
return nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic)
|
||||||
|
}
|
||||||
|
|
||||||
|
if header.Version != 1 {
|
||||||
|
return nil, nil, fmt.Errorf("unexpected version %v", header.Version)
|
||||||
|
}
|
||||||
|
|
||||||
|
if header.Flags != 0 {
|
||||||
|
return nil, nil, fmt.Errorf("unsupported flags %v", header.Flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
remainder := int64(header.HdrLen) - int64(binary.Size(&header))
|
||||||
|
if remainder < 0 {
|
||||||
|
return nil, nil, errors.New("header is too short")
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := io.CopyN(internal.DiscardZeroes{}, rd, remainder); err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("header padding: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := rd.Seek(int64(header.HdrLen+header.StringOff), io.SeekStart); err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("can't seek to start of string section: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rawStrings, err := readStringTable(io.LimitReader(rd, int64(header.StringLen)))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("can't read type names: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := rd.Seek(int64(header.HdrLen+header.TypeOff), io.SeekStart); err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("can't seek to start of type section: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rawTypes, err := readTypes(io.LimitReader(rd, int64(header.TypeLen)), bo)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("can't read types: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return rawTypes, rawStrings, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type variable struct {
|
||||||
|
section string
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func fixupDatasec(rawTypes []rawType, rawStrings stringTable, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) error {
|
||||||
|
for i, rawType := range rawTypes {
|
||||||
|
if rawType.Kind() != kindDatasec {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
name, err := rawStrings.Lookup(rawType.NameOff)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if name == ".kconfig" || name == ".ksyms" {
|
||||||
|
return fmt.Errorf("reference to %s: %w", name, ErrNotSupported)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rawTypes[i].SizeType != 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
size, ok := sectionSizes[name]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("data section %s: missing size", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
rawTypes[i].SizeType = size
|
||||||
|
|
||||||
|
secinfos := rawType.data.([]btfVarSecinfo)
|
||||||
|
for j, secInfo := range secinfos {
|
||||||
|
id := int(secInfo.Type - 1)
|
||||||
|
if id >= len(rawTypes) {
|
||||||
|
return fmt.Errorf("data section %s: invalid type id %d for variable %d", name, id, j)
|
||||||
|
}
|
||||||
|
|
||||||
|
varName, err := rawStrings.Lookup(rawTypes[id].NameOff)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("data section %s: can't get name for type %d: %w", name, id, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
offset, ok := variableOffsets[variable{name, varName}]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("data section %s: missing offset for variable %s", name, varName)
|
||||||
|
}
|
||||||
|
|
||||||
|
secinfos[j].Offset = offset
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type marshalOpts struct {
|
||||||
|
ByteOrder binary.ByteOrder
|
||||||
|
StripFuncLinkage bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Spec) marshal(opts marshalOpts) ([]byte, error) {
|
||||||
|
var (
|
||||||
|
buf bytes.Buffer
|
||||||
|
header = new(btfHeader)
|
||||||
|
headerLen = binary.Size(header)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Reserve space for the header. We have to write it last since
|
||||||
|
// we don't know the size of the type section yet.
|
||||||
|
_, _ = buf.Write(make([]byte, headerLen))
|
||||||
|
|
||||||
|
// Write type section, just after the header.
|
||||||
|
for _, raw := range s.rawTypes {
|
||||||
|
switch {
|
||||||
|
case opts.StripFuncLinkage && raw.Kind() == kindFunc:
|
||||||
|
raw.SetLinkage(linkageStatic)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := raw.Marshal(&buf, opts.ByteOrder); err != nil {
|
||||||
|
return nil, fmt.Errorf("can't marshal BTF: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
typeLen := uint32(buf.Len() - headerLen)
|
||||||
|
|
||||||
|
// Write string section after type section.
|
||||||
|
_, _ = buf.Write(s.strings)
|
||||||
|
|
||||||
|
// Fill out the header, and write it out.
|
||||||
|
header = &btfHeader{
|
||||||
|
Magic: btfMagic,
|
||||||
|
Version: 1,
|
||||||
|
Flags: 0,
|
||||||
|
HdrLen: uint32(headerLen),
|
||||||
|
TypeOff: 0,
|
||||||
|
TypeLen: typeLen,
|
||||||
|
StringOff: typeLen,
|
||||||
|
StringLen: uint32(len(s.strings)),
|
||||||
|
}
|
||||||
|
|
||||||
|
raw := buf.Bytes()
|
||||||
|
err := binary.Write(sliceWriter(raw[:headerLen]), opts.ByteOrder, header)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't write header: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return raw, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type sliceWriter []byte
|
||||||
|
|
||||||
|
func (sw sliceWriter) Write(p []byte) (int, error) {
|
||||||
|
if len(p) != len(sw) {
|
||||||
|
return 0, errors.New("size doesn't match")
|
||||||
|
}
|
||||||
|
|
||||||
|
return copy(sw, p), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Program finds the BTF for a specific section.
|
||||||
|
//
|
||||||
|
// Length is the number of bytes in the raw BPF instruction stream.
|
||||||
|
//
|
||||||
|
// Returns an error which may wrap ErrNoExtendedInfo if the Spec doesn't
|
||||||
|
// contain extended BTF info.
|
||||||
|
func (s *Spec) Program(name string, length uint64) (*Program, error) {
|
||||||
|
if length == 0 {
|
||||||
|
return nil, errors.New("length musn't be zero")
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.funcInfos == nil && s.lineInfos == nil && s.coreRelos == nil {
|
||||||
|
return nil, fmt.Errorf("BTF for section %s: %w", name, ErrNoExtendedInfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
funcInfos, funcOK := s.funcInfos[name]
|
||||||
|
lineInfos, lineOK := s.lineInfos[name]
|
||||||
|
coreRelos, coreOK := s.coreRelos[name]
|
||||||
|
|
||||||
|
if !funcOK && !lineOK && !coreOK {
|
||||||
|
return nil, fmt.Errorf("no extended BTF info for section %s", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Program{s, length, funcInfos, lineInfos, coreRelos}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Datasec returns the BTF required to create maps which represent data sections.
|
||||||
|
func (s *Spec) Datasec(name string) (*Map, error) {
|
||||||
|
var datasec Datasec
|
||||||
|
if err := s.FindType(name, &datasec); err != nil {
|
||||||
|
return nil, fmt.Errorf("data section %s: can't get BTF: %w", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
m := NewMap(s, &Void{}, &datasec)
|
||||||
|
return &m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindType searches for a type with a specific name.
|
||||||
|
//
|
||||||
|
// hint determines the type of the returned Type.
|
||||||
|
//
|
||||||
|
// Returns an error wrapping ErrNotFound if no matching
|
||||||
|
// type exists in spec.
|
||||||
|
func (s *Spec) FindType(name string, typ Type) error {
|
||||||
|
var (
|
||||||
|
wanted = reflect.TypeOf(typ)
|
||||||
|
candidate Type
|
||||||
|
)
|
||||||
|
|
||||||
|
for _, typ := range s.namedTypes[essentialName(name)] {
|
||||||
|
if reflect.TypeOf(typ) != wanted {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Match against the full name, not just the essential one.
|
||||||
|
if typ.name() != name {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if candidate != nil {
|
||||||
|
return fmt.Errorf("type %s: multiple candidates for %T", name, typ)
|
||||||
|
}
|
||||||
|
|
||||||
|
candidate = typ
|
||||||
|
}
|
||||||
|
|
||||||
|
if candidate == nil {
|
||||||
|
return fmt.Errorf("type %s: %w", name, ErrNotFound)
|
||||||
|
}
|
||||||
|
|
||||||
|
value := reflect.Indirect(reflect.ValueOf(copyType(candidate)))
|
||||||
|
reflect.Indirect(reflect.ValueOf(typ)).Set(value)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle is a reference to BTF loaded into the kernel.
|
||||||
|
type Handle struct {
|
||||||
|
fd *internal.FD
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHandle loads BTF into the kernel.
|
||||||
|
//
|
||||||
|
// Returns ErrNotSupported if BTF is not supported.
|
||||||
|
func NewHandle(spec *Spec) (*Handle, error) {
|
||||||
|
if err := haveBTF(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if spec.byteOrder != internal.NativeEndian {
|
||||||
|
return nil, fmt.Errorf("can't load %s BTF on %s", spec.byteOrder, internal.NativeEndian)
|
||||||
|
}
|
||||||
|
|
||||||
|
btf, err := spec.marshal(marshalOpts{
|
||||||
|
ByteOrder: internal.NativeEndian,
|
||||||
|
StripFuncLinkage: haveFuncLinkage() != nil,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't marshal BTF: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if uint64(len(btf)) > math.MaxUint32 {
|
||||||
|
return nil, errors.New("BTF exceeds the maximum size")
|
||||||
|
}
|
||||||
|
|
||||||
|
attr := &bpfLoadBTFAttr{
|
||||||
|
btf: internal.NewSlicePointer(btf),
|
||||||
|
btfSize: uint32(len(btf)),
|
||||||
|
}
|
||||||
|
|
||||||
|
fd, err := bpfLoadBTF(attr)
|
||||||
|
if err != nil {
|
||||||
|
logBuf := make([]byte, 64*1024)
|
||||||
|
attr.logBuf = internal.NewSlicePointer(logBuf)
|
||||||
|
attr.btfLogSize = uint32(len(logBuf))
|
||||||
|
attr.btfLogLevel = 1
|
||||||
|
_, logErr := bpfLoadBTF(attr)
|
||||||
|
return nil, internal.ErrorWithLog(err, logBuf, logErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Handle{fd}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close destroys the handle.
|
||||||
|
//
|
||||||
|
// Subsequent calls to FD will return an invalid value.
|
||||||
|
func (h *Handle) Close() error {
|
||||||
|
return h.fd.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// FD returns the file descriptor for the handle.
|
||||||
|
func (h *Handle) FD() int {
|
||||||
|
value, err := h.fd.Value()
|
||||||
|
if err != nil {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
return int(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map is the BTF for a map.
|
||||||
|
type Map struct {
|
||||||
|
spec *Spec
|
||||||
|
key, value Type
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMap returns a new Map containing the given values.
|
||||||
|
// The key and value arguments are initialized to Void if nil values are given.
|
||||||
|
func NewMap(spec *Spec, key Type, value Type) Map {
|
||||||
|
if key == nil {
|
||||||
|
key = &Void{}
|
||||||
|
}
|
||||||
|
if value == nil {
|
||||||
|
value = &Void{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return Map{
|
||||||
|
spec: spec,
|
||||||
|
key: key,
|
||||||
|
value: value,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MapSpec should be a method on Map, but is a free function
|
||||||
|
// to hide it from users of the ebpf package.
|
||||||
|
func MapSpec(m *Map) *Spec {
|
||||||
|
return m.spec
|
||||||
|
}
|
||||||
|
|
||||||
|
// MapKey should be a method on Map, but is a free function
|
||||||
|
// to hide it from users of the ebpf package.
|
||||||
|
func MapKey(m *Map) Type {
|
||||||
|
return m.key
|
||||||
|
}
|
||||||
|
|
||||||
|
// MapValue should be a method on Map, but is a free function
|
||||||
|
// to hide it from users of the ebpf package.
|
||||||
|
func MapValue(m *Map) Type {
|
||||||
|
return m.value
|
||||||
|
}
|
||||||
|
|
||||||
|
// Program is the BTF information for a stream of instructions.
|
||||||
|
type Program struct {
|
||||||
|
spec *Spec
|
||||||
|
length uint64
|
||||||
|
funcInfos, lineInfos extInfo
|
||||||
|
coreRelos bpfCoreRelos
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProgramSpec returns the Spec needed for loading function and line infos into the kernel.
|
||||||
|
//
|
||||||
|
// This is a free function instead of a method to hide it from users
|
||||||
|
// of package ebpf.
|
||||||
|
func ProgramSpec(s *Program) *Spec {
|
||||||
|
return s.spec
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProgramAppend the information from other to the Program.
|
||||||
|
//
|
||||||
|
// This is a free function instead of a method to hide it from users
|
||||||
|
// of package ebpf.
|
||||||
|
func ProgramAppend(s, other *Program) error {
|
||||||
|
funcInfos, err := s.funcInfos.append(other.funcInfos, s.length)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("func infos: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
lineInfos, err := s.lineInfos.append(other.lineInfos, s.length)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("line infos: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.funcInfos = funcInfos
|
||||||
|
s.lineInfos = lineInfos
|
||||||
|
s.coreRelos = s.coreRelos.append(other.coreRelos, s.length)
|
||||||
|
s.length += other.length
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProgramFuncInfos returns the binary form of BTF function infos.
|
||||||
|
//
|
||||||
|
// This is a free function instead of a method to hide it from users
|
||||||
|
// of package ebpf.
|
||||||
|
func ProgramFuncInfos(s *Program) (recordSize uint32, bytes []byte, err error) {
|
||||||
|
bytes, err = s.funcInfos.MarshalBinary()
|
||||||
|
if err != nil {
|
||||||
|
return 0, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.funcInfos.recordSize, bytes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProgramLineInfos returns the binary form of BTF line infos.
|
||||||
|
//
|
||||||
|
// This is a free function instead of a method to hide it from users
|
||||||
|
// of package ebpf.
|
||||||
|
func ProgramLineInfos(s *Program) (recordSize uint32, bytes []byte, err error) {
|
||||||
|
bytes, err = s.lineInfos.MarshalBinary()
|
||||||
|
if err != nil {
|
||||||
|
return 0, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.lineInfos.recordSize, bytes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProgramRelocations returns the CO-RE relocations required to adjust the
|
||||||
|
// program to the target.
|
||||||
|
//
|
||||||
|
// This is a free function instead of a method to hide it from users
|
||||||
|
// of package ebpf.
|
||||||
|
func ProgramRelocations(s *Program, target *Spec) (map[uint64]Relocation, error) {
|
||||||
|
if len(s.coreRelos) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return coreRelocate(s.spec, target, s.coreRelos)
|
||||||
|
}
|
||||||
|
|
||||||
|
type bpfLoadBTFAttr struct {
|
||||||
|
btf internal.Pointer
|
||||||
|
logBuf internal.Pointer
|
||||||
|
btfSize uint32
|
||||||
|
btfLogSize uint32
|
||||||
|
btfLogLevel uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func bpfLoadBTF(attr *bpfLoadBTFAttr) (*internal.FD, error) {
|
||||||
|
fd, err := internal.BPF(internal.BPF_BTF_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return internal.NewFD(uint32(fd)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func marshalBTF(types interface{}, strings []byte, bo binary.ByteOrder) []byte {
|
||||||
|
const minHeaderLength = 24
|
||||||
|
|
||||||
|
typesLen := uint32(binary.Size(types))
|
||||||
|
header := btfHeader{
|
||||||
|
Magic: btfMagic,
|
||||||
|
Version: 1,
|
||||||
|
HdrLen: minHeaderLength,
|
||||||
|
TypeOff: 0,
|
||||||
|
TypeLen: typesLen,
|
||||||
|
StringOff: typesLen,
|
||||||
|
StringLen: uint32(len(strings)),
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
_ = binary.Write(buf, bo, &header)
|
||||||
|
_ = binary.Write(buf, bo, types)
|
||||||
|
buf.Write(strings)
|
||||||
|
|
||||||
|
return buf.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
var haveBTF = internal.FeatureTest("BTF", "5.1", func() error {
|
||||||
|
var (
|
||||||
|
types struct {
|
||||||
|
Integer btfType
|
||||||
|
Var btfType
|
||||||
|
btfVar struct{ Linkage uint32 }
|
||||||
|
}
|
||||||
|
strings = []byte{0, 'a', 0}
|
||||||
|
)
|
||||||
|
|
||||||
|
// We use a BTF_KIND_VAR here, to make sure that
|
||||||
|
// the kernel understands BTF at least as well as we
|
||||||
|
// do. BTF_KIND_VAR was introduced ~5.1.
|
||||||
|
types.Integer.SetKind(kindPointer)
|
||||||
|
types.Var.NameOff = 1
|
||||||
|
types.Var.SetKind(kindVar)
|
||||||
|
types.Var.SizeType = 1
|
||||||
|
|
||||||
|
btf := marshalBTF(&types, strings, internal.NativeEndian)
|
||||||
|
|
||||||
|
fd, err := bpfLoadBTF(&bpfLoadBTFAttr{
|
||||||
|
btf: internal.NewSlicePointer(btf),
|
||||||
|
btfSize: uint32(len(btf)),
|
||||||
|
})
|
||||||
|
if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
|
||||||
|
// Treat both EINVAL and EPERM as not supported: loading the program
|
||||||
|
// might still succeed without BTF.
|
||||||
|
return internal.ErrNotSupported
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fd.Close()
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() error {
|
||||||
|
if err := haveBTF(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
types struct {
|
||||||
|
FuncProto btfType
|
||||||
|
Func btfType
|
||||||
|
}
|
||||||
|
strings = []byte{0, 'a', 0}
|
||||||
|
)
|
||||||
|
|
||||||
|
types.FuncProto.SetKind(kindFuncProto)
|
||||||
|
types.Func.SetKind(kindFunc)
|
||||||
|
types.Func.SizeType = 1 // aka FuncProto
|
||||||
|
types.Func.NameOff = 1
|
||||||
|
types.Func.SetLinkage(linkageGlobal)
|
||||||
|
|
||||||
|
btf := marshalBTF(&types, strings, internal.NativeEndian)
|
||||||
|
|
||||||
|
fd, err := bpfLoadBTF(&bpfLoadBTFAttr{
|
||||||
|
btf: internal.NewSlicePointer(btf),
|
||||||
|
btfSize: uint32(len(btf)),
|
||||||
|
})
|
||||||
|
if errors.Is(err, unix.EINVAL) {
|
||||||
|
return internal.ErrNotSupported
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fd.Close()
|
||||||
|
return nil
|
||||||
|
})
|
|
@ -0,0 +1,269 @@
|
||||||
|
package btf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// btfKind describes a Type.
|
||||||
|
type btfKind uint8
|
||||||
|
|
||||||
|
// Equivalents of the BTF_KIND_* constants.
|
||||||
|
const (
|
||||||
|
kindUnknown btfKind = iota
|
||||||
|
kindInt
|
||||||
|
kindPointer
|
||||||
|
kindArray
|
||||||
|
kindStruct
|
||||||
|
kindUnion
|
||||||
|
kindEnum
|
||||||
|
kindForward
|
||||||
|
kindTypedef
|
||||||
|
kindVolatile
|
||||||
|
kindConst
|
||||||
|
kindRestrict
|
||||||
|
// Added ~4.20
|
||||||
|
kindFunc
|
||||||
|
kindFuncProto
|
||||||
|
// Added ~5.1
|
||||||
|
kindVar
|
||||||
|
kindDatasec
|
||||||
|
)
|
||||||
|
|
||||||
|
type btfFuncLinkage uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
linkageStatic btfFuncLinkage = iota
|
||||||
|
linkageGlobal
|
||||||
|
linkageExtern
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
btfTypeKindShift = 24
|
||||||
|
btfTypeKindLen = 4
|
||||||
|
btfTypeVlenShift = 0
|
||||||
|
btfTypeVlenMask = 16
|
||||||
|
btfTypeKindFlagShift = 31
|
||||||
|
btfTypeKindFlagMask = 1
|
||||||
|
)
|
||||||
|
|
||||||
|
// btfType is equivalent to struct btf_type in Documentation/bpf/btf.rst.
|
||||||
|
type btfType struct {
|
||||||
|
NameOff uint32
|
||||||
|
/* "info" bits arrangement
|
||||||
|
* bits 0-15: vlen (e.g. # of struct's members), linkage
|
||||||
|
* bits 16-23: unused
|
||||||
|
* bits 24-27: kind (e.g. int, ptr, array...etc)
|
||||||
|
* bits 28-30: unused
|
||||||
|
* bit 31: kind_flag, currently used by
|
||||||
|
* struct, union and fwd
|
||||||
|
*/
|
||||||
|
Info uint32
|
||||||
|
/* "size" is used by INT, ENUM, STRUCT and UNION.
|
||||||
|
* "size" tells the size of the type it is describing.
|
||||||
|
*
|
||||||
|
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
|
||||||
|
* FUNC and FUNC_PROTO.
|
||||||
|
* "type" is a type_id referring to another type.
|
||||||
|
*/
|
||||||
|
SizeType uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k btfKind) String() string {
|
||||||
|
switch k {
|
||||||
|
case kindUnknown:
|
||||||
|
return "Unknown"
|
||||||
|
case kindInt:
|
||||||
|
return "Integer"
|
||||||
|
case kindPointer:
|
||||||
|
return "Pointer"
|
||||||
|
case kindArray:
|
||||||
|
return "Array"
|
||||||
|
case kindStruct:
|
||||||
|
return "Struct"
|
||||||
|
case kindUnion:
|
||||||
|
return "Union"
|
||||||
|
case kindEnum:
|
||||||
|
return "Enumeration"
|
||||||
|
case kindForward:
|
||||||
|
return "Forward"
|
||||||
|
case kindTypedef:
|
||||||
|
return "Typedef"
|
||||||
|
case kindVolatile:
|
||||||
|
return "Volatile"
|
||||||
|
case kindConst:
|
||||||
|
return "Const"
|
||||||
|
case kindRestrict:
|
||||||
|
return "Restrict"
|
||||||
|
case kindFunc:
|
||||||
|
return "Function"
|
||||||
|
case kindFuncProto:
|
||||||
|
return "Function Proto"
|
||||||
|
case kindVar:
|
||||||
|
return "Variable"
|
||||||
|
case kindDatasec:
|
||||||
|
return "Section"
|
||||||
|
default:
|
||||||
|
return fmt.Sprintf("Unknown (%d)", k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func mask(len uint32) uint32 {
|
||||||
|
return (1 << len) - 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bt *btfType) info(len, shift uint32) uint32 {
|
||||||
|
return (bt.Info >> shift) & mask(len)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bt *btfType) setInfo(value, len, shift uint32) {
|
||||||
|
bt.Info &^= mask(len) << shift
|
||||||
|
bt.Info |= (value & mask(len)) << shift
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bt *btfType) Kind() btfKind {
|
||||||
|
return btfKind(bt.info(btfTypeKindLen, btfTypeKindShift))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bt *btfType) SetKind(kind btfKind) {
|
||||||
|
bt.setInfo(uint32(kind), btfTypeKindLen, btfTypeKindShift)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bt *btfType) Vlen() int {
|
||||||
|
return int(bt.info(btfTypeVlenMask, btfTypeVlenShift))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bt *btfType) SetVlen(vlen int) {
|
||||||
|
bt.setInfo(uint32(vlen), btfTypeVlenMask, btfTypeVlenShift)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bt *btfType) KindFlag() bool {
|
||||||
|
return bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift) == 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bt *btfType) Linkage() btfFuncLinkage {
|
||||||
|
return btfFuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bt *btfType) SetLinkage(linkage btfFuncLinkage) {
|
||||||
|
bt.setInfo(uint32(linkage), btfTypeVlenMask, btfTypeVlenShift)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bt *btfType) Type() TypeID {
|
||||||
|
// TODO: Panic here if wrong kind?
|
||||||
|
return TypeID(bt.SizeType)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bt *btfType) Size() uint32 {
|
||||||
|
// TODO: Panic here if wrong kind?
|
||||||
|
return bt.SizeType
|
||||||
|
}
|
||||||
|
|
||||||
|
type rawType struct {
|
||||||
|
btfType
|
||||||
|
data interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rt *rawType) Marshal(w io.Writer, bo binary.ByteOrder) error {
|
||||||
|
if err := binary.Write(w, bo, &rt.btfType); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if rt.data == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return binary.Write(w, bo, rt.data)
|
||||||
|
}
|
||||||
|
|
||||||
|
type btfArray struct {
|
||||||
|
Type TypeID
|
||||||
|
IndexType TypeID
|
||||||
|
Nelems uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type btfMember struct {
|
||||||
|
NameOff uint32
|
||||||
|
Type TypeID
|
||||||
|
Offset uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type btfVarSecinfo struct {
|
||||||
|
Type TypeID
|
||||||
|
Offset uint32
|
||||||
|
Size uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type btfVariable struct {
|
||||||
|
Linkage uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type btfEnum struct {
|
||||||
|
NameOff uint32
|
||||||
|
Val int32
|
||||||
|
}
|
||||||
|
|
||||||
|
type btfParam struct {
|
||||||
|
NameOff uint32
|
||||||
|
Type TypeID
|
||||||
|
}
|
||||||
|
|
||||||
|
func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) {
|
||||||
|
var (
|
||||||
|
header btfType
|
||||||
|
types []rawType
|
||||||
|
)
|
||||||
|
|
||||||
|
for id := TypeID(1); ; id++ {
|
||||||
|
if err := binary.Read(r, bo, &header); err == io.EOF {
|
||||||
|
return types, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't read type info for id %v: %v", id, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var data interface{}
|
||||||
|
switch header.Kind() {
|
||||||
|
case kindInt:
|
||||||
|
data = new(uint32)
|
||||||
|
case kindPointer:
|
||||||
|
case kindArray:
|
||||||
|
data = new(btfArray)
|
||||||
|
case kindStruct:
|
||||||
|
fallthrough
|
||||||
|
case kindUnion:
|
||||||
|
data = make([]btfMember, header.Vlen())
|
||||||
|
case kindEnum:
|
||||||
|
data = make([]btfEnum, header.Vlen())
|
||||||
|
case kindForward:
|
||||||
|
case kindTypedef:
|
||||||
|
case kindVolatile:
|
||||||
|
case kindConst:
|
||||||
|
case kindRestrict:
|
||||||
|
case kindFunc:
|
||||||
|
case kindFuncProto:
|
||||||
|
data = make([]btfParam, header.Vlen())
|
||||||
|
case kindVar:
|
||||||
|
data = new(btfVariable)
|
||||||
|
case kindDatasec:
|
||||||
|
data = make([]btfVarSecinfo, header.Vlen())
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("type id %v: unknown kind: %v", id, header.Kind())
|
||||||
|
}
|
||||||
|
|
||||||
|
if data == nil {
|
||||||
|
types = append(types, rawType{header, nil})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := binary.Read(r, bo, data); err != nil {
|
||||||
|
return nil, fmt.Errorf("type id %d: kind %v: can't read %T: %v", id, header.Kind(), data, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
types = append(types, rawType{header, data})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func intEncoding(raw uint32) (IntEncoding, uint32, byte) {
|
||||||
|
return IntEncoding((raw & 0x0f000000) >> 24), (raw & 0x00ff0000) >> 16, byte(raw & 0x000000ff)
|
||||||
|
}
|
|
@ -0,0 +1,388 @@
|
||||||
|
package btf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Code in this file is derived from libbpf, which is available under a BSD
|
||||||
|
// 2-Clause license.
|
||||||
|
|
||||||
|
// Relocation describes a CO-RE relocation.
|
||||||
|
type Relocation struct {
|
||||||
|
Current uint32
|
||||||
|
New uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r Relocation) equal(other Relocation) bool {
|
||||||
|
return r.Current == other.Current && r.New == other.New
|
||||||
|
}
|
||||||
|
|
||||||
|
// coreReloKind is the type of CO-RE relocation
|
||||||
|
type coreReloKind uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
reloFieldByteOffset coreReloKind = iota /* field byte offset */
|
||||||
|
reloFieldByteSize /* field size in bytes */
|
||||||
|
reloFieldExists /* field existence in target kernel */
|
||||||
|
reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */
|
||||||
|
reloFieldLShiftU64 /* bitfield-specific left bitshift */
|
||||||
|
reloFieldRShiftU64 /* bitfield-specific right bitshift */
|
||||||
|
reloTypeIDLocal /* type ID in local BPF object */
|
||||||
|
reloTypeIDTarget /* type ID in target kernel */
|
||||||
|
reloTypeExists /* type existence in target kernel */
|
||||||
|
reloTypeSize /* type size in bytes */
|
||||||
|
reloEnumvalExists /* enum value existence in target kernel */
|
||||||
|
reloEnumvalValue /* enum value integer value */
|
||||||
|
)
|
||||||
|
|
||||||
|
func (k coreReloKind) String() string {
|
||||||
|
switch k {
|
||||||
|
case reloFieldByteOffset:
|
||||||
|
return "byte_off"
|
||||||
|
case reloFieldByteSize:
|
||||||
|
return "byte_sz"
|
||||||
|
case reloFieldExists:
|
||||||
|
return "field_exists"
|
||||||
|
case reloFieldSigned:
|
||||||
|
return "signed"
|
||||||
|
case reloFieldLShiftU64:
|
||||||
|
return "lshift_u64"
|
||||||
|
case reloFieldRShiftU64:
|
||||||
|
return "rshift_u64"
|
||||||
|
case reloTypeIDLocal:
|
||||||
|
return "local_type_id"
|
||||||
|
case reloTypeIDTarget:
|
||||||
|
return "target_type_id"
|
||||||
|
case reloTypeExists:
|
||||||
|
return "type_exists"
|
||||||
|
case reloTypeSize:
|
||||||
|
return "type_size"
|
||||||
|
case reloEnumvalExists:
|
||||||
|
return "enumval_exists"
|
||||||
|
case reloEnumvalValue:
|
||||||
|
return "enumval_value"
|
||||||
|
default:
|
||||||
|
return "unknown"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func coreRelocate(local, target *Spec, coreRelos bpfCoreRelos) (map[uint64]Relocation, error) {
|
||||||
|
if target == nil {
|
||||||
|
var err error
|
||||||
|
target, err = loadKernelSpec()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if local.byteOrder != target.byteOrder {
|
||||||
|
return nil, fmt.Errorf("can't relocate %s against %s", local.byteOrder, target.byteOrder)
|
||||||
|
}
|
||||||
|
|
||||||
|
relocations := make(map[uint64]Relocation, len(coreRelos))
|
||||||
|
for _, relo := range coreRelos {
|
||||||
|
accessorStr, err := local.strings.Lookup(relo.AccessStrOff)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
accessor, err := parseCoreAccessor(accessorStr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("accessor %q: %s", accessorStr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if int(relo.TypeID) >= len(local.types) {
|
||||||
|
return nil, fmt.Errorf("invalid type id %d", relo.TypeID)
|
||||||
|
}
|
||||||
|
|
||||||
|
typ := local.types[relo.TypeID]
|
||||||
|
|
||||||
|
if relo.ReloKind == reloTypeIDLocal {
|
||||||
|
relocations[uint64(relo.InsnOff)] = Relocation{
|
||||||
|
uint32(typ.ID()),
|
||||||
|
uint32(typ.ID()),
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
named, ok := typ.(namedType)
|
||||||
|
if !ok || named.name() == "" {
|
||||||
|
return nil, fmt.Errorf("relocate anonymous type %s: %w", typ.String(), ErrNotSupported)
|
||||||
|
}
|
||||||
|
|
||||||
|
name := essentialName(named.name())
|
||||||
|
res, err := coreCalculateRelocation(typ, target.namedTypes[name], relo.ReloKind, accessor)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("relocate %s: %w", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
relocations[uint64(relo.InsnOff)] = res
|
||||||
|
}
|
||||||
|
|
||||||
|
return relocations, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var errAmbiguousRelocation = errors.New("ambiguous relocation")
|
||||||
|
|
||||||
|
func coreCalculateRelocation(local Type, targets []namedType, kind coreReloKind, localAccessor coreAccessor) (Relocation, error) {
|
||||||
|
var relos []Relocation
|
||||||
|
var matches []Type
|
||||||
|
for _, target := range targets {
|
||||||
|
switch kind {
|
||||||
|
case reloTypeIDTarget:
|
||||||
|
if localAccessor[0] != 0 {
|
||||||
|
return Relocation{}, fmt.Errorf("%s: unexpected non-zero accessor", kind)
|
||||||
|
}
|
||||||
|
|
||||||
|
if compat, err := coreAreTypesCompatible(local, target); err != nil {
|
||||||
|
return Relocation{}, fmt.Errorf("%s: %s", kind, err)
|
||||||
|
} else if !compat {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
relos = append(relos, Relocation{uint32(target.ID()), uint32(target.ID())})
|
||||||
|
|
||||||
|
default:
|
||||||
|
return Relocation{}, fmt.Errorf("relocation %s: %w", kind, ErrNotSupported)
|
||||||
|
}
|
||||||
|
matches = append(matches, target)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(relos) == 0 {
|
||||||
|
// TODO: Add switch for existence checks like reloEnumvalExists here.
|
||||||
|
|
||||||
|
// TODO: This might have to be poisoned.
|
||||||
|
return Relocation{}, fmt.Errorf("no relocation found, tried %v", targets)
|
||||||
|
}
|
||||||
|
|
||||||
|
relo := relos[0]
|
||||||
|
for _, altRelo := range relos[1:] {
|
||||||
|
if !altRelo.equal(relo) {
|
||||||
|
return Relocation{}, fmt.Errorf("multiple types %v match: %w", matches, errAmbiguousRelocation)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return relo, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
/* coreAccessor contains a path through a struct. It contains at least one index.
|
||||||
|
*
|
||||||
|
* The interpretation depends on the kind of the relocation. The following is
|
||||||
|
* taken from struct bpf_core_relo in libbpf_internal.h:
|
||||||
|
*
|
||||||
|
* - for field-based relocations, string encodes an accessed field using
|
||||||
|
* a sequence of field and array indices, separated by colon (:). It's
|
||||||
|
* conceptually very close to LLVM's getelementptr ([0]) instruction's
|
||||||
|
* arguments for identifying offset to a field.
|
||||||
|
* - for type-based relocations, strings is expected to be just "0";
|
||||||
|
* - for enum value-based relocations, string contains an index of enum
|
||||||
|
* value within its enum type;
|
||||||
|
*
|
||||||
|
* Example to provide a better feel.
|
||||||
|
*
|
||||||
|
* struct sample {
|
||||||
|
* int a;
|
||||||
|
* struct {
|
||||||
|
* int b[10];
|
||||||
|
* };
|
||||||
|
* };
|
||||||
|
*
|
||||||
|
* struct sample s = ...;
|
||||||
|
* int x = &s->a; // encoded as "0:0" (a is field #0)
|
||||||
|
* int y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1,
|
||||||
|
* // b is field #0 inside anon struct, accessing elem #5)
|
||||||
|
* int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
|
||||||
|
*/
|
||||||
|
type coreAccessor []int
|
||||||
|
|
||||||
|
func parseCoreAccessor(accessor string) (coreAccessor, error) {
|
||||||
|
if accessor == "" {
|
||||||
|
return nil, fmt.Errorf("empty accessor")
|
||||||
|
}
|
||||||
|
|
||||||
|
var result coreAccessor
|
||||||
|
parts := strings.Split(accessor, ":")
|
||||||
|
for _, part := range parts {
|
||||||
|
// 31 bits to avoid overflowing int on 32 bit platforms.
|
||||||
|
index, err := strconv.ParseUint(part, 10, 31)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("accessor index %q: %s", part, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
result = append(result, int(index))
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
/* The comment below is from bpf_core_types_are_compat in libbpf.c:
|
||||||
|
*
|
||||||
|
* Check local and target types for compatibility. This check is used for
|
||||||
|
* type-based CO-RE relocations and follow slightly different rules than
|
||||||
|
* field-based relocations. This function assumes that root types were already
|
||||||
|
* checked for name match. Beyond that initial root-level name check, names
|
||||||
|
* are completely ignored. Compatibility rules are as follows:
|
||||||
|
* - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
|
||||||
|
* kind should match for local and target types (i.e., STRUCT is not
|
||||||
|
* compatible with UNION);
|
||||||
|
* - for ENUMs, the size is ignored;
|
||||||
|
* - for INT, size and signedness are ignored;
|
||||||
|
* - for ARRAY, dimensionality is ignored, element types are checked for
|
||||||
|
* compatibility recursively;
|
||||||
|
* - CONST/VOLATILE/RESTRICT modifiers are ignored;
|
||||||
|
* - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
|
||||||
|
* - FUNC_PROTOs are compatible if they have compatible signature: same
|
||||||
|
* number of input args and compatible return and argument types.
|
||||||
|
* These rules are not set in stone and probably will be adjusted as we get
|
||||||
|
* more experience with using BPF CO-RE relocations.
|
||||||
|
*/
|
||||||
|
func coreAreTypesCompatible(localType Type, targetType Type) (bool, error) {
|
||||||
|
var (
|
||||||
|
localTs, targetTs typeDeque
|
||||||
|
l, t = &localType, &targetType
|
||||||
|
depth = 0
|
||||||
|
)
|
||||||
|
|
||||||
|
for ; l != nil && t != nil; l, t = localTs.shift(), targetTs.shift() {
|
||||||
|
if depth >= maxTypeDepth {
|
||||||
|
return false, errors.New("types are nested too deep")
|
||||||
|
}
|
||||||
|
|
||||||
|
localType = skipQualifierAndTypedef(*l)
|
||||||
|
targetType = skipQualifierAndTypedef(*t)
|
||||||
|
|
||||||
|
if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch lv := (localType).(type) {
|
||||||
|
case *Void, *Struct, *Union, *Enum, *Fwd:
|
||||||
|
// Nothing to do here
|
||||||
|
|
||||||
|
case *Int:
|
||||||
|
tv := targetType.(*Int)
|
||||||
|
if lv.isBitfield() || tv.isBitfield() {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
case *Pointer, *Array:
|
||||||
|
depth++
|
||||||
|
localType.walk(&localTs)
|
||||||
|
targetType.walk(&targetTs)
|
||||||
|
|
||||||
|
case *FuncProto:
|
||||||
|
tv := targetType.(*FuncProto)
|
||||||
|
if len(lv.Params) != len(tv.Params) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
depth++
|
||||||
|
localType.walk(&localTs)
|
||||||
|
targetType.walk(&targetTs)
|
||||||
|
|
||||||
|
default:
|
||||||
|
return false, fmt.Errorf("unsupported type %T", localType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if l != nil {
|
||||||
|
return false, fmt.Errorf("dangling local type %T", *l)
|
||||||
|
}
|
||||||
|
|
||||||
|
if t != nil {
|
||||||
|
return false, fmt.Errorf("dangling target type %T", *t)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
/* The comment below is from bpf_core_fields_are_compat in libbpf.c:
|
||||||
|
*
|
||||||
|
* Check two types for compatibility for the purpose of field access
|
||||||
|
* relocation. const/volatile/restrict and typedefs are skipped to ensure we
|
||||||
|
* are relocating semantically compatible entities:
|
||||||
|
* - any two STRUCTs/UNIONs are compatible and can be mixed;
|
||||||
|
* - any two FWDs are compatible, if their names match (modulo flavor suffix);
|
||||||
|
* - any two PTRs are always compatible;
|
||||||
|
* - for ENUMs, names should be the same (ignoring flavor suffix) or at
|
||||||
|
* least one of enums should be anonymous;
|
||||||
|
* - for ENUMs, check sizes, names are ignored;
|
||||||
|
* - for INT, size and signedness are ignored;
|
||||||
|
* - for ARRAY, dimensionality is ignored, element types are checked for
|
||||||
|
* compatibility recursively;
|
||||||
|
* - everything else shouldn't be ever a target of relocation.
|
||||||
|
* These rules are not set in stone and probably will be adjusted as we get
|
||||||
|
* more experience with using BPF CO-RE relocations.
|
||||||
|
*/
|
||||||
|
func coreAreMembersCompatible(localType Type, targetType Type) (bool, error) {
|
||||||
|
doNamesMatch := func(a, b string) bool {
|
||||||
|
if a == "" || b == "" {
|
||||||
|
// allow anonymous and named type to match
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return essentialName(a) == essentialName(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
for depth := 0; depth <= maxTypeDepth; depth++ {
|
||||||
|
localType = skipQualifierAndTypedef(localType)
|
||||||
|
targetType = skipQualifierAndTypedef(targetType)
|
||||||
|
|
||||||
|
_, lok := localType.(composite)
|
||||||
|
_, tok := targetType.(composite)
|
||||||
|
if lok && tok {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch lv := localType.(type) {
|
||||||
|
case *Pointer:
|
||||||
|
return true, nil
|
||||||
|
|
||||||
|
case *Enum:
|
||||||
|
tv := targetType.(*Enum)
|
||||||
|
return doNamesMatch(lv.name(), tv.name()), nil
|
||||||
|
|
||||||
|
case *Fwd:
|
||||||
|
tv := targetType.(*Fwd)
|
||||||
|
return doNamesMatch(lv.name(), tv.name()), nil
|
||||||
|
|
||||||
|
case *Int:
|
||||||
|
tv := targetType.(*Int)
|
||||||
|
return !lv.isBitfield() && !tv.isBitfield(), nil
|
||||||
|
|
||||||
|
case *Array:
|
||||||
|
tv := targetType.(*Array)
|
||||||
|
|
||||||
|
localType = lv.Type
|
||||||
|
targetType = tv.Type
|
||||||
|
|
||||||
|
default:
|
||||||
|
return false, fmt.Errorf("unsupported type %T", localType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, errors.New("types are nested too deep")
|
||||||
|
}
|
||||||
|
|
||||||
|
func skipQualifierAndTypedef(typ Type) Type {
|
||||||
|
result := typ
|
||||||
|
for depth := 0; depth <= maxTypeDepth; depth++ {
|
||||||
|
switch v := (result).(type) {
|
||||||
|
case qualifier:
|
||||||
|
result = v.qualify()
|
||||||
|
case *Typedef:
|
||||||
|
result = v.Type
|
||||||
|
default:
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return typ
|
||||||
|
}
|
|
@ -0,0 +1,8 @@
|
||||||
|
// Package btf handles data encoded according to the BPF Type Format.
|
||||||
|
//
|
||||||
|
// The canonical documentation lives in the Linux kernel repository and is
|
||||||
|
// available at https://www.kernel.org/doc/html/latest/bpf/btf.html
|
||||||
|
//
|
||||||
|
// The API is very much unstable. You should only use this via the main
|
||||||
|
// ebpf library.
|
||||||
|
package btf
|
|
@ -0,0 +1,281 @@
|
||||||
|
package btf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
|
||||||
|
"github.com/cilium/ebpf/asm"
|
||||||
|
"github.com/cilium/ebpf/internal"
|
||||||
|
)
|
||||||
|
|
||||||
|
type btfExtHeader struct {
|
||||||
|
Magic uint16
|
||||||
|
Version uint8
|
||||||
|
Flags uint8
|
||||||
|
HdrLen uint32
|
||||||
|
|
||||||
|
FuncInfoOff uint32
|
||||||
|
FuncInfoLen uint32
|
||||||
|
LineInfoOff uint32
|
||||||
|
LineInfoLen uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type btfExtCoreHeader struct {
|
||||||
|
CoreReloOff uint32
|
||||||
|
CoreReloLen uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseExtInfos(r io.ReadSeeker, bo binary.ByteOrder, strings stringTable) (funcInfo, lineInfo map[string]extInfo, coreRelos map[string]bpfCoreRelos, err error) {
|
||||||
|
var header btfExtHeader
|
||||||
|
var coreHeader btfExtCoreHeader
|
||||||
|
if err := binary.Read(r, bo, &header); err != nil {
|
||||||
|
return nil, nil, nil, fmt.Errorf("can't read header: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if header.Magic != btfMagic {
|
||||||
|
return nil, nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic)
|
||||||
|
}
|
||||||
|
|
||||||
|
if header.Version != 1 {
|
||||||
|
return nil, nil, nil, fmt.Errorf("unexpected version %v", header.Version)
|
||||||
|
}
|
||||||
|
|
||||||
|
if header.Flags != 0 {
|
||||||
|
return nil, nil, nil, fmt.Errorf("unsupported flags %v", header.Flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
remainder := int64(header.HdrLen) - int64(binary.Size(&header))
|
||||||
|
if remainder < 0 {
|
||||||
|
return nil, nil, nil, errors.New("header is too short")
|
||||||
|
}
|
||||||
|
|
||||||
|
coreHdrSize := int64(binary.Size(&coreHeader))
|
||||||
|
if remainder >= coreHdrSize {
|
||||||
|
if err := binary.Read(r, bo, &coreHeader); err != nil {
|
||||||
|
return nil, nil, nil, fmt.Errorf("can't read CO-RE relocation header: %v", err)
|
||||||
|
}
|
||||||
|
remainder -= coreHdrSize
|
||||||
|
}
|
||||||
|
|
||||||
|
// Of course, the .BTF.ext header has different semantics than the
|
||||||
|
// .BTF ext header. We need to ignore non-null values.
|
||||||
|
_, err = io.CopyN(ioutil.Discard, r, remainder)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, fmt.Errorf("header padding: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := r.Seek(int64(header.HdrLen+header.FuncInfoOff), io.SeekStart); err != nil {
|
||||||
|
return nil, nil, nil, fmt.Errorf("can't seek to function info section: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := bufio.NewReader(io.LimitReader(r, int64(header.FuncInfoLen)))
|
||||||
|
funcInfo, err = parseExtInfo(buf, bo, strings)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, fmt.Errorf("function info: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := r.Seek(int64(header.HdrLen+header.LineInfoOff), io.SeekStart); err != nil {
|
||||||
|
return nil, nil, nil, fmt.Errorf("can't seek to line info section: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf = bufio.NewReader(io.LimitReader(r, int64(header.LineInfoLen)))
|
||||||
|
lineInfo, err = parseExtInfo(buf, bo, strings)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, fmt.Errorf("line info: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if coreHeader.CoreReloOff > 0 && coreHeader.CoreReloLen > 0 {
|
||||||
|
if _, err := r.Seek(int64(header.HdrLen+coreHeader.CoreReloOff), io.SeekStart); err != nil {
|
||||||
|
return nil, nil, nil, fmt.Errorf("can't seek to CO-RE relocation section: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
coreRelos, err = parseExtInfoRelos(io.LimitReader(r, int64(coreHeader.CoreReloLen)), bo, strings)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, fmt.Errorf("CO-RE relocation info: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return funcInfo, lineInfo, coreRelos, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type btfExtInfoSec struct {
|
||||||
|
SecNameOff uint32
|
||||||
|
NumInfo uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type extInfoRecord struct {
|
||||||
|
InsnOff uint64
|
||||||
|
Opaque []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type extInfo struct {
|
||||||
|
recordSize uint32
|
||||||
|
records []extInfoRecord
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ei extInfo) append(other extInfo, offset uint64) (extInfo, error) {
|
||||||
|
if other.recordSize != ei.recordSize {
|
||||||
|
return extInfo{}, fmt.Errorf("ext_info record size mismatch, want %d (got %d)", ei.recordSize, other.recordSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
records := make([]extInfoRecord, 0, len(ei.records)+len(other.records))
|
||||||
|
records = append(records, ei.records...)
|
||||||
|
for _, info := range other.records {
|
||||||
|
records = append(records, extInfoRecord{
|
||||||
|
InsnOff: info.InsnOff + offset,
|
||||||
|
Opaque: info.Opaque,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return extInfo{ei.recordSize, records}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ei extInfo) MarshalBinary() ([]byte, error) {
|
||||||
|
if len(ei.records) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := bytes.NewBuffer(make([]byte, 0, int(ei.recordSize)*len(ei.records)))
|
||||||
|
for _, info := range ei.records {
|
||||||
|
// The kernel expects offsets in number of raw bpf instructions,
|
||||||
|
// while the ELF tracks it in bytes.
|
||||||
|
insnOff := uint32(info.InsnOff / asm.InstructionSize)
|
||||||
|
if err := binary.Write(buf, internal.NativeEndian, insnOff); err != nil {
|
||||||
|
return nil, fmt.Errorf("can't write instruction offset: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.Write(info.Opaque)
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseExtInfo(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]extInfo, error) {
|
||||||
|
const maxRecordSize = 256
|
||||||
|
|
||||||
|
var recordSize uint32
|
||||||
|
if err := binary.Read(r, bo, &recordSize); err != nil {
|
||||||
|
return nil, fmt.Errorf("can't read record size: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if recordSize < 4 {
|
||||||
|
// Need at least insnOff
|
||||||
|
return nil, errors.New("record size too short")
|
||||||
|
}
|
||||||
|
if recordSize > maxRecordSize {
|
||||||
|
return nil, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
result := make(map[string]extInfo)
|
||||||
|
for {
|
||||||
|
secName, infoHeader, err := parseExtInfoHeader(r, bo, strings)
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var records []extInfoRecord
|
||||||
|
for i := uint32(0); i < infoHeader.NumInfo; i++ {
|
||||||
|
var byteOff uint32
|
||||||
|
if err := binary.Read(r, bo, &byteOff); err != nil {
|
||||||
|
return nil, fmt.Errorf("section %v: can't read extended info offset: %v", secName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := make([]byte, int(recordSize-4))
|
||||||
|
if _, err := io.ReadFull(r, buf); err != nil {
|
||||||
|
return nil, fmt.Errorf("section %v: can't read record: %v", secName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if byteOff%asm.InstructionSize != 0 {
|
||||||
|
return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, byteOff)
|
||||||
|
}
|
||||||
|
|
||||||
|
records = append(records, extInfoRecord{uint64(byteOff), buf})
|
||||||
|
}
|
||||||
|
|
||||||
|
result[secName] = extInfo{
|
||||||
|
recordSize,
|
||||||
|
records,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// bpfCoreRelo matches `struct bpf_core_relo` from the kernel
|
||||||
|
type bpfCoreRelo struct {
|
||||||
|
InsnOff uint32
|
||||||
|
TypeID TypeID
|
||||||
|
AccessStrOff uint32
|
||||||
|
ReloKind coreReloKind
|
||||||
|
}
|
||||||
|
|
||||||
|
type bpfCoreRelos []bpfCoreRelo
|
||||||
|
|
||||||
|
// append two slices of extInfoRelo to each other. The InsnOff of b are adjusted
|
||||||
|
// by offset.
|
||||||
|
func (r bpfCoreRelos) append(other bpfCoreRelos, offset uint64) bpfCoreRelos {
|
||||||
|
result := make([]bpfCoreRelo, 0, len(r)+len(other))
|
||||||
|
result = append(result, r...)
|
||||||
|
for _, relo := range other {
|
||||||
|
relo.InsnOff += uint32(offset)
|
||||||
|
result = append(result, relo)
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
var extInfoReloSize = binary.Size(bpfCoreRelo{})
|
||||||
|
|
||||||
|
func parseExtInfoRelos(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]bpfCoreRelos, error) {
|
||||||
|
var recordSize uint32
|
||||||
|
if err := binary.Read(r, bo, &recordSize); err != nil {
|
||||||
|
return nil, fmt.Errorf("read record size: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if recordSize != uint32(extInfoReloSize) {
|
||||||
|
return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
result := make(map[string]bpfCoreRelos)
|
||||||
|
for {
|
||||||
|
secName, infoHeader, err := parseExtInfoHeader(r, bo, strings)
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var relos []bpfCoreRelo
|
||||||
|
for i := uint32(0); i < infoHeader.NumInfo; i++ {
|
||||||
|
var relo bpfCoreRelo
|
||||||
|
if err := binary.Read(r, bo, &relo); err != nil {
|
||||||
|
return nil, fmt.Errorf("section %v: read record: %v", secName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if relo.InsnOff%asm.InstructionSize != 0 {
|
||||||
|
return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, relo.InsnOff)
|
||||||
|
}
|
||||||
|
|
||||||
|
relos = append(relos, relo)
|
||||||
|
}
|
||||||
|
|
||||||
|
result[secName] = relos
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseExtInfoHeader(r io.Reader, bo binary.ByteOrder, strings stringTable) (string, *btfExtInfoSec, error) {
|
||||||
|
var infoHeader btfExtInfoSec
|
||||||
|
if err := binary.Read(r, bo, &infoHeader); err != nil {
|
||||||
|
return "", nil, fmt.Errorf("read ext info header: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
secName, err := strings.Lookup(infoHeader.SecNameOff)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, fmt.Errorf("get section name: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if infoHeader.NumInfo == 0 {
|
||||||
|
return "", nil, fmt.Errorf("section %s has zero records", secName)
|
||||||
|
}
|
||||||
|
|
||||||
|
return secName, &infoHeader, nil
|
||||||
|
}
|
|
@ -0,0 +1,49 @@
|
||||||
|
// +build gofuzz
|
||||||
|
|
||||||
|
// Use with https://github.com/dvyukov/go-fuzz
|
||||||
|
|
||||||
|
package btf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
|
||||||
|
"github.com/cilium/ebpf/internal"
|
||||||
|
)
|
||||||
|
|
||||||
|
func FuzzSpec(data []byte) int {
|
||||||
|
if len(data) < binary.Size(btfHeader{}) {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
spec, err := loadNakedSpec(bytes.NewReader(data), internal.NativeEndian, nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
if spec != nil {
|
||||||
|
panic("spec is not nil")
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
if spec == nil {
|
||||||
|
panic("spec is nil")
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzExtInfo(data []byte) int {
|
||||||
|
if len(data) < binary.Size(btfExtHeader{}) {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
table := stringTable("\x00foo\x00barfoo\x00")
|
||||||
|
info, err := parseExtInfo(bytes.NewReader(data), internal.NativeEndian, table)
|
||||||
|
if err != nil {
|
||||||
|
if info != nil {
|
||||||
|
panic("info is not nil")
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
if info == nil {
|
||||||
|
panic("info is nil")
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
|
@ -0,0 +1,60 @@
|
||||||
|
package btf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stringTable []byte
|
||||||
|
|
||||||
|
func readStringTable(r io.Reader) (stringTable, error) {
|
||||||
|
contents, err := ioutil.ReadAll(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't read string table: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(contents) < 1 {
|
||||||
|
return nil, errors.New("string table is empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
if contents[0] != '\x00' {
|
||||||
|
return nil, errors.New("first item in string table is non-empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
if contents[len(contents)-1] != '\x00' {
|
||||||
|
return nil, errors.New("string table isn't null terminated")
|
||||||
|
}
|
||||||
|
|
||||||
|
return stringTable(contents), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (st stringTable) Lookup(offset uint32) (string, error) {
|
||||||
|
if int64(offset) > int64(^uint(0)>>1) {
|
||||||
|
return "", fmt.Errorf("offset %d overflows int", offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
pos := int(offset)
|
||||||
|
if pos >= len(st) {
|
||||||
|
return "", fmt.Errorf("offset %d is out of bounds", offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
if pos > 0 && st[pos-1] != '\x00' {
|
||||||
|
return "", fmt.Errorf("offset %d isn't start of a string", offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
str := st[pos:]
|
||||||
|
end := bytes.IndexByte(str, '\x00')
|
||||||
|
if end == -1 {
|
||||||
|
return "", fmt.Errorf("offset %d isn't null terminated", offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(str[:end]), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (st stringTable) LookupName(offset uint32) (Name, error) {
|
||||||
|
str, err := st.Lookup(offset)
|
||||||
|
return Name(str), err
|
||||||
|
}
|
|
@ -0,0 +1,871 @@
|
||||||
|
package btf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const maxTypeDepth = 32
|
||||||
|
|
||||||
|
// TypeID identifies a type in a BTF section.
|
||||||
|
type TypeID uint32
|
||||||
|
|
||||||
|
// ID implements part of the Type interface.
|
||||||
|
func (tid TypeID) ID() TypeID {
|
||||||
|
return tid
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type represents a type described by BTF.
|
||||||
|
type Type interface {
|
||||||
|
ID() TypeID
|
||||||
|
|
||||||
|
String() string
|
||||||
|
|
||||||
|
// Make a copy of the type, without copying Type members.
|
||||||
|
copy() Type
|
||||||
|
|
||||||
|
// Enumerate all nested Types. Repeated calls must visit nested
|
||||||
|
// types in the same order.
|
||||||
|
walk(*typeDeque)
|
||||||
|
}
|
||||||
|
|
||||||
|
// namedType is a type with a name.
|
||||||
|
//
|
||||||
|
// Most named types simply embed Name.
|
||||||
|
type namedType interface {
|
||||||
|
Type
|
||||||
|
name() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name identifies a type.
|
||||||
|
//
|
||||||
|
// Anonymous types have an empty name.
|
||||||
|
type Name string
|
||||||
|
|
||||||
|
func (n Name) name() string {
|
||||||
|
return string(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Void is the unit type of BTF.
|
||||||
|
type Void struct{}
|
||||||
|
|
||||||
|
func (v *Void) ID() TypeID { return 0 }
|
||||||
|
func (v *Void) String() string { return "void#0" }
|
||||||
|
func (v *Void) size() uint32 { return 0 }
|
||||||
|
func (v *Void) copy() Type { return (*Void)(nil) }
|
||||||
|
func (v *Void) walk(*typeDeque) {}
|
||||||
|
|
||||||
|
type IntEncoding byte
|
||||||
|
|
||||||
|
const (
|
||||||
|
Signed IntEncoding = 1 << iota
|
||||||
|
Char
|
||||||
|
Bool
|
||||||
|
)
|
||||||
|
|
||||||
|
// Int is an integer of a given length.
|
||||||
|
type Int struct {
|
||||||
|
TypeID
|
||||||
|
Name
|
||||||
|
|
||||||
|
// The size of the integer in bytes.
|
||||||
|
Size uint32
|
||||||
|
Encoding IntEncoding
|
||||||
|
// Offset is the starting bit offset. Currently always 0.
|
||||||
|
// See https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-int
|
||||||
|
Offset uint32
|
||||||
|
Bits byte
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ namedType = (*Int)(nil)
|
||||||
|
|
||||||
|
func (i *Int) String() string {
|
||||||
|
var s strings.Builder
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case i.Encoding&Char != 0:
|
||||||
|
s.WriteString("char")
|
||||||
|
case i.Encoding&Bool != 0:
|
||||||
|
s.WriteString("bool")
|
||||||
|
default:
|
||||||
|
if i.Encoding&Signed == 0 {
|
||||||
|
s.WriteRune('u')
|
||||||
|
}
|
||||||
|
s.WriteString("int")
|
||||||
|
fmt.Fprintf(&s, "%d", i.Size*8)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(&s, "#%d", i.TypeID)
|
||||||
|
|
||||||
|
if i.Bits > 0 {
|
||||||
|
fmt.Fprintf(&s, "[bits=%d]", i.Bits)
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *Int) size() uint32 { return i.Size }
|
||||||
|
func (i *Int) walk(*typeDeque) {}
|
||||||
|
func (i *Int) copy() Type {
|
||||||
|
cpy := *i
|
||||||
|
return &cpy
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *Int) isBitfield() bool {
|
||||||
|
return i.Offset > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pointer is a pointer to another type.
|
||||||
|
type Pointer struct {
|
||||||
|
TypeID
|
||||||
|
Target Type
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Pointer) String() string {
|
||||||
|
return fmt.Sprintf("pointer#%d[target=#%d]", p.TypeID, p.Target.ID())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Pointer) size() uint32 { return 8 }
|
||||||
|
func (p *Pointer) walk(tdq *typeDeque) { tdq.push(&p.Target) }
|
||||||
|
func (p *Pointer) copy() Type {
|
||||||
|
cpy := *p
|
||||||
|
return &cpy
|
||||||
|
}
|
||||||
|
|
||||||
|
// Array is an array with a fixed number of elements.
|
||||||
|
type Array struct {
|
||||||
|
TypeID
|
||||||
|
Type Type
|
||||||
|
Nelems uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (arr *Array) String() string {
|
||||||
|
return fmt.Sprintf("array#%d[type=#%d n=%d]", arr.TypeID, arr.Type.ID(), arr.Nelems)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (arr *Array) walk(tdq *typeDeque) { tdq.push(&arr.Type) }
|
||||||
|
func (arr *Array) copy() Type {
|
||||||
|
cpy := *arr
|
||||||
|
return &cpy
|
||||||
|
}
|
||||||
|
|
||||||
|
// Struct is a compound type of consecutive members.
|
||||||
|
type Struct struct {
|
||||||
|
TypeID
|
||||||
|
Name
|
||||||
|
// The size of the struct including padding, in bytes
|
||||||
|
Size uint32
|
||||||
|
Members []Member
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Struct) String() string {
|
||||||
|
return fmt.Sprintf("struct#%d[%q]", s.TypeID, s.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Struct) size() uint32 { return s.Size }
|
||||||
|
|
||||||
|
func (s *Struct) walk(tdq *typeDeque) {
|
||||||
|
for i := range s.Members {
|
||||||
|
tdq.push(&s.Members[i].Type)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Struct) copy() Type {
|
||||||
|
cpy := *s
|
||||||
|
cpy.Members = make([]Member, len(s.Members))
|
||||||
|
copy(cpy.Members, s.Members)
|
||||||
|
return &cpy
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Struct) members() []Member {
|
||||||
|
return s.Members
|
||||||
|
}
|
||||||
|
|
||||||
|
// Union is a compound type where members occupy the same memory.
|
||||||
|
type Union struct {
|
||||||
|
TypeID
|
||||||
|
Name
|
||||||
|
// The size of the union including padding, in bytes.
|
||||||
|
Size uint32
|
||||||
|
Members []Member
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *Union) String() string {
|
||||||
|
return fmt.Sprintf("union#%d[%q]", u.TypeID, u.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *Union) size() uint32 { return u.Size }
|
||||||
|
|
||||||
|
func (u *Union) walk(tdq *typeDeque) {
|
||||||
|
for i := range u.Members {
|
||||||
|
tdq.push(&u.Members[i].Type)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *Union) copy() Type {
|
||||||
|
cpy := *u
|
||||||
|
cpy.Members = make([]Member, len(u.Members))
|
||||||
|
copy(cpy.Members, u.Members)
|
||||||
|
return &cpy
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *Union) members() []Member {
|
||||||
|
return u.Members
|
||||||
|
}
|
||||||
|
|
||||||
|
type composite interface {
|
||||||
|
members() []Member
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ composite = (*Struct)(nil)
|
||||||
|
_ composite = (*Union)(nil)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Member is part of a Struct or Union.
|
||||||
|
//
|
||||||
|
// It is not a valid Type.
|
||||||
|
type Member struct {
|
||||||
|
Name
|
||||||
|
Type Type
|
||||||
|
// Offset is the bit offset of this member
|
||||||
|
Offset uint32
|
||||||
|
BitfieldSize uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enum lists possible values.
|
||||||
|
type Enum struct {
|
||||||
|
TypeID
|
||||||
|
Name
|
||||||
|
Values []EnumValue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Enum) String() string {
|
||||||
|
return fmt.Sprintf("enum#%d[%q]", e.TypeID, e.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnumValue is part of an Enum
|
||||||
|
//
|
||||||
|
// Is is not a valid Type
|
||||||
|
type EnumValue struct {
|
||||||
|
Name
|
||||||
|
Value int32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Enum) size() uint32 { return 4 }
|
||||||
|
func (e *Enum) walk(*typeDeque) {}
|
||||||
|
func (e *Enum) copy() Type {
|
||||||
|
cpy := *e
|
||||||
|
cpy.Values = make([]EnumValue, len(e.Values))
|
||||||
|
copy(cpy.Values, e.Values)
|
||||||
|
return &cpy
|
||||||
|
}
|
||||||
|
|
||||||
|
// FwdKind is the type of forward declaration.
|
||||||
|
type FwdKind int
|
||||||
|
|
||||||
|
// Valid types of forward declaration.
|
||||||
|
const (
|
||||||
|
FwdStruct FwdKind = iota
|
||||||
|
FwdUnion
|
||||||
|
)
|
||||||
|
|
||||||
|
func (fk FwdKind) String() string {
|
||||||
|
switch fk {
|
||||||
|
case FwdStruct:
|
||||||
|
return "struct"
|
||||||
|
case FwdUnion:
|
||||||
|
return "union"
|
||||||
|
default:
|
||||||
|
return fmt.Sprintf("%T(%d)", fk, int(fk))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fwd is a forward declaration of a Type.
|
||||||
|
type Fwd struct {
|
||||||
|
TypeID
|
||||||
|
Name
|
||||||
|
Kind FwdKind
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fwd) String() string {
|
||||||
|
return fmt.Sprintf("fwd#%d[%s %q]", f.TypeID, f.Kind, f.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fwd) walk(*typeDeque) {}
|
||||||
|
func (f *Fwd) copy() Type {
|
||||||
|
cpy := *f
|
||||||
|
return &cpy
|
||||||
|
}
|
||||||
|
|
||||||
|
// Typedef is an alias of a Type.
|
||||||
|
type Typedef struct {
|
||||||
|
TypeID
|
||||||
|
Name
|
||||||
|
Type Type
|
||||||
|
}
|
||||||
|
|
||||||
|
func (td *Typedef) String() string {
|
||||||
|
return fmt.Sprintf("typedef#%d[%q #%d]", td.TypeID, td.Name, td.Type.ID())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (td *Typedef) walk(tdq *typeDeque) { tdq.push(&td.Type) }
|
||||||
|
func (td *Typedef) copy() Type {
|
||||||
|
cpy := *td
|
||||||
|
return &cpy
|
||||||
|
}
|
||||||
|
|
||||||
|
// Volatile is a qualifier.
|
||||||
|
type Volatile struct {
|
||||||
|
TypeID
|
||||||
|
Type Type
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *Volatile) String() string {
|
||||||
|
return fmt.Sprintf("volatile#%d[#%d]", v.TypeID, v.Type.ID())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *Volatile) qualify() Type { return v.Type }
|
||||||
|
func (v *Volatile) walk(tdq *typeDeque) { tdq.push(&v.Type) }
|
||||||
|
func (v *Volatile) copy() Type {
|
||||||
|
cpy := *v
|
||||||
|
return &cpy
|
||||||
|
}
|
||||||
|
|
||||||
|
// Const is a qualifier.
|
||||||
|
type Const struct {
|
||||||
|
TypeID
|
||||||
|
Type Type
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Const) String() string {
|
||||||
|
return fmt.Sprintf("const#%d[#%d]", c.TypeID, c.Type.ID())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Const) qualify() Type { return c.Type }
|
||||||
|
func (c *Const) walk(tdq *typeDeque) { tdq.push(&c.Type) }
|
||||||
|
func (c *Const) copy() Type {
|
||||||
|
cpy := *c
|
||||||
|
return &cpy
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restrict is a qualifier.
|
||||||
|
type Restrict struct {
|
||||||
|
TypeID
|
||||||
|
Type Type
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Restrict) String() string {
|
||||||
|
return fmt.Sprintf("restrict#%d[#%d]", r.TypeID, r.Type.ID())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Restrict) qualify() Type { return r.Type }
|
||||||
|
func (r *Restrict) walk(tdq *typeDeque) { tdq.push(&r.Type) }
|
||||||
|
func (r *Restrict) copy() Type {
|
||||||
|
cpy := *r
|
||||||
|
return &cpy
|
||||||
|
}
|
||||||
|
|
||||||
|
// Func is a function definition.
|
||||||
|
type Func struct {
|
||||||
|
TypeID
|
||||||
|
Name
|
||||||
|
Type Type
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Func) String() string {
|
||||||
|
return fmt.Sprintf("func#%d[%q proto=#%d]", f.TypeID, f.Name, f.Type.ID())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Func) walk(tdq *typeDeque) { tdq.push(&f.Type) }
|
||||||
|
func (f *Func) copy() Type {
|
||||||
|
cpy := *f
|
||||||
|
return &cpy
|
||||||
|
}
|
||||||
|
|
||||||
|
// FuncProto is a function declaration.
|
||||||
|
type FuncProto struct {
|
||||||
|
TypeID
|
||||||
|
Return Type
|
||||||
|
Params []FuncParam
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fp *FuncProto) String() string {
|
||||||
|
var s strings.Builder
|
||||||
|
fmt.Fprintf(&s, "proto#%d[", fp.TypeID)
|
||||||
|
for _, param := range fp.Params {
|
||||||
|
fmt.Fprintf(&s, "%q=#%d, ", param.Name, param.Type.ID())
|
||||||
|
}
|
||||||
|
fmt.Fprintf(&s, "return=#%d]", fp.Return.ID())
|
||||||
|
return s.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fp *FuncProto) walk(tdq *typeDeque) {
|
||||||
|
tdq.push(&fp.Return)
|
||||||
|
for i := range fp.Params {
|
||||||
|
tdq.push(&fp.Params[i].Type)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fp *FuncProto) copy() Type {
|
||||||
|
cpy := *fp
|
||||||
|
cpy.Params = make([]FuncParam, len(fp.Params))
|
||||||
|
copy(cpy.Params, fp.Params)
|
||||||
|
return &cpy
|
||||||
|
}
|
||||||
|
|
||||||
|
type FuncParam struct {
|
||||||
|
Name
|
||||||
|
Type Type
|
||||||
|
}
|
||||||
|
|
||||||
|
// Var is a global variable.
|
||||||
|
type Var struct {
|
||||||
|
TypeID
|
||||||
|
Name
|
||||||
|
Type Type
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *Var) String() string {
|
||||||
|
// TODO: Linkage
|
||||||
|
return fmt.Sprintf("var#%d[%q]", v.TypeID, v.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *Var) walk(tdq *typeDeque) { tdq.push(&v.Type) }
|
||||||
|
func (v *Var) copy() Type {
|
||||||
|
cpy := *v
|
||||||
|
return &cpy
|
||||||
|
}
|
||||||
|
|
||||||
|
// Datasec is a global program section containing data.
|
||||||
|
type Datasec struct {
|
||||||
|
TypeID
|
||||||
|
Name
|
||||||
|
Size uint32
|
||||||
|
Vars []VarSecinfo
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ds *Datasec) String() string {
|
||||||
|
return fmt.Sprintf("section#%d[%q]", ds.TypeID, ds.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ds *Datasec) size() uint32 { return ds.Size }
|
||||||
|
|
||||||
|
func (ds *Datasec) walk(tdq *typeDeque) {
|
||||||
|
for i := range ds.Vars {
|
||||||
|
tdq.push(&ds.Vars[i].Type)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ds *Datasec) copy() Type {
|
||||||
|
cpy := *ds
|
||||||
|
cpy.Vars = make([]VarSecinfo, len(ds.Vars))
|
||||||
|
copy(cpy.Vars, ds.Vars)
|
||||||
|
return &cpy
|
||||||
|
}
|
||||||
|
|
||||||
|
// VarSecinfo describes variable in a Datasec
|
||||||
|
//
|
||||||
|
// It is not a valid Type.
|
||||||
|
type VarSecinfo struct {
|
||||||
|
Type Type
|
||||||
|
Offset uint32
|
||||||
|
Size uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type sizer interface {
|
||||||
|
size() uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ sizer = (*Int)(nil)
|
||||||
|
_ sizer = (*Pointer)(nil)
|
||||||
|
_ sizer = (*Struct)(nil)
|
||||||
|
_ sizer = (*Union)(nil)
|
||||||
|
_ sizer = (*Enum)(nil)
|
||||||
|
_ sizer = (*Datasec)(nil)
|
||||||
|
)
|
||||||
|
|
||||||
|
type qualifier interface {
|
||||||
|
qualify() Type
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ qualifier = (*Const)(nil)
|
||||||
|
_ qualifier = (*Restrict)(nil)
|
||||||
|
_ qualifier = (*Volatile)(nil)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Sizeof returns the size of a type in bytes.
|
||||||
|
//
|
||||||
|
// Returns an error if the size can't be computed.
|
||||||
|
func Sizeof(typ Type) (int, error) {
|
||||||
|
var (
|
||||||
|
n = int64(1)
|
||||||
|
elem int64
|
||||||
|
)
|
||||||
|
|
||||||
|
for i := 0; i < maxTypeDepth; i++ {
|
||||||
|
switch v := typ.(type) {
|
||||||
|
case *Array:
|
||||||
|
if n > 0 && int64(v.Nelems) > math.MaxInt64/n {
|
||||||
|
return 0, errors.New("overflow")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Arrays may be of zero length, which allows
|
||||||
|
// n to be zero as well.
|
||||||
|
n *= int64(v.Nelems)
|
||||||
|
typ = v.Type
|
||||||
|
continue
|
||||||
|
|
||||||
|
case sizer:
|
||||||
|
elem = int64(v.size())
|
||||||
|
|
||||||
|
case *Typedef:
|
||||||
|
typ = v.Type
|
||||||
|
continue
|
||||||
|
|
||||||
|
case qualifier:
|
||||||
|
typ = v.qualify()
|
||||||
|
continue
|
||||||
|
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("unrecognized type %T", typ)
|
||||||
|
}
|
||||||
|
|
||||||
|
if n > 0 && elem > math.MaxInt64/n {
|
||||||
|
return 0, errors.New("overflow")
|
||||||
|
}
|
||||||
|
|
||||||
|
size := n * elem
|
||||||
|
if int64(int(size)) != size {
|
||||||
|
return 0, errors.New("overflow")
|
||||||
|
}
|
||||||
|
|
||||||
|
return int(size), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, errors.New("exceeded type depth")
|
||||||
|
}
|
||||||
|
|
||||||
|
// copy a Type recursively.
|
||||||
|
//
|
||||||
|
// typ may form a cycle.
|
||||||
|
func copyType(typ Type) Type {
|
||||||
|
var (
|
||||||
|
copies = make(map[Type]Type)
|
||||||
|
work typeDeque
|
||||||
|
)
|
||||||
|
|
||||||
|
for t := &typ; t != nil; t = work.pop() {
|
||||||
|
// *t is the identity of the type.
|
||||||
|
if cpy := copies[*t]; cpy != nil {
|
||||||
|
*t = cpy
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
cpy := (*t).copy()
|
||||||
|
copies[*t] = cpy
|
||||||
|
*t = cpy
|
||||||
|
|
||||||
|
// Mark any nested types for copying.
|
||||||
|
cpy.walk(&work)
|
||||||
|
}
|
||||||
|
|
||||||
|
return typ
|
||||||
|
}
|
||||||
|
|
||||||
|
// typeDeque keeps track of pointers to types which still
|
||||||
|
// need to be visited.
|
||||||
|
type typeDeque struct {
|
||||||
|
types []*Type
|
||||||
|
read, write uint64
|
||||||
|
mask uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// push adds a type to the stack.
|
||||||
|
func (dq *typeDeque) push(t *Type) {
|
||||||
|
if dq.write-dq.read < uint64(len(dq.types)) {
|
||||||
|
dq.types[dq.write&dq.mask] = t
|
||||||
|
dq.write++
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
new := len(dq.types) * 2
|
||||||
|
if new == 0 {
|
||||||
|
new = 8
|
||||||
|
}
|
||||||
|
|
||||||
|
types := make([]*Type, new)
|
||||||
|
pivot := dq.read & dq.mask
|
||||||
|
n := copy(types, dq.types[pivot:])
|
||||||
|
n += copy(types[n:], dq.types[:pivot])
|
||||||
|
types[n] = t
|
||||||
|
|
||||||
|
dq.types = types
|
||||||
|
dq.mask = uint64(new) - 1
|
||||||
|
dq.read, dq.write = 0, uint64(n+1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// shift returns the first element or null.
|
||||||
|
func (dq *typeDeque) shift() *Type {
|
||||||
|
if dq.read == dq.write {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
index := dq.read & dq.mask
|
||||||
|
t := dq.types[index]
|
||||||
|
dq.types[index] = nil
|
||||||
|
dq.read++
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// pop returns the last element or null.
|
||||||
|
func (dq *typeDeque) pop() *Type {
|
||||||
|
if dq.read == dq.write {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
dq.write--
|
||||||
|
index := dq.write & dq.mask
|
||||||
|
t := dq.types[index]
|
||||||
|
dq.types[index] = nil
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// all returns all elements.
|
||||||
|
//
|
||||||
|
// The deque is empty after calling this method.
|
||||||
|
func (dq *typeDeque) all() []*Type {
|
||||||
|
length := dq.write - dq.read
|
||||||
|
types := make([]*Type, 0, length)
|
||||||
|
for t := dq.shift(); t != nil; t = dq.shift() {
|
||||||
|
types = append(types, t)
|
||||||
|
}
|
||||||
|
return types
|
||||||
|
}
|
||||||
|
|
||||||
|
// inflateRawTypes takes a list of raw btf types linked via type IDs, and turns
|
||||||
|
// it into a graph of Types connected via pointers.
|
||||||
|
//
|
||||||
|
// Returns a map of named types (so, where NameOff is non-zero) and a slice of types
|
||||||
|
// indexed by TypeID. Since BTF ignores compilation units, multiple types may share
|
||||||
|
// the same name. A Type may form a cyclic graph by pointing at itself.
|
||||||
|
func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type, namedTypes map[string][]namedType, err error) {
|
||||||
|
type fixupDef struct {
|
||||||
|
id TypeID
|
||||||
|
expectedKind btfKind
|
||||||
|
typ *Type
|
||||||
|
}
|
||||||
|
|
||||||
|
var fixups []fixupDef
|
||||||
|
fixup := func(id TypeID, expectedKind btfKind, typ *Type) {
|
||||||
|
fixups = append(fixups, fixupDef{id, expectedKind, typ})
|
||||||
|
}
|
||||||
|
|
||||||
|
convertMembers := func(raw []btfMember, kindFlag bool) ([]Member, error) {
|
||||||
|
// NB: The fixup below relies on pre-allocating this array to
|
||||||
|
// work, since otherwise append might re-allocate members.
|
||||||
|
members := make([]Member, 0, len(raw))
|
||||||
|
for i, btfMember := range raw {
|
||||||
|
name, err := rawStrings.LookupName(btfMember.NameOff)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't get name for member %d: %w", i, err)
|
||||||
|
}
|
||||||
|
m := Member{
|
||||||
|
Name: name,
|
||||||
|
Offset: btfMember.Offset,
|
||||||
|
}
|
||||||
|
if kindFlag {
|
||||||
|
m.BitfieldSize = btfMember.Offset >> 24
|
||||||
|
m.Offset &= 0xffffff
|
||||||
|
}
|
||||||
|
members = append(members, m)
|
||||||
|
}
|
||||||
|
for i := range members {
|
||||||
|
fixup(raw[i].Type, kindUnknown, &members[i].Type)
|
||||||
|
}
|
||||||
|
return members, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
types = make([]Type, 0, len(rawTypes))
|
||||||
|
types = append(types, (*Void)(nil))
|
||||||
|
namedTypes = make(map[string][]namedType)
|
||||||
|
|
||||||
|
for i, raw := range rawTypes {
|
||||||
|
var (
|
||||||
|
// Void is defined to always be type ID 0, and is thus
|
||||||
|
// omitted from BTF.
|
||||||
|
id = TypeID(i + 1)
|
||||||
|
typ Type
|
||||||
|
)
|
||||||
|
|
||||||
|
name, err := rawStrings.LookupName(raw.NameOff)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("get name for type id %d: %w", id, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch raw.Kind() {
|
||||||
|
case kindInt:
|
||||||
|
encoding, offset, bits := intEncoding(*raw.data.(*uint32))
|
||||||
|
typ = &Int{id, name, raw.Size(), encoding, offset, bits}
|
||||||
|
|
||||||
|
case kindPointer:
|
||||||
|
ptr := &Pointer{id, nil}
|
||||||
|
fixup(raw.Type(), kindUnknown, &ptr.Target)
|
||||||
|
typ = ptr
|
||||||
|
|
||||||
|
case kindArray:
|
||||||
|
btfArr := raw.data.(*btfArray)
|
||||||
|
|
||||||
|
// IndexType is unused according to btf.rst.
|
||||||
|
// Don't make it available right now.
|
||||||
|
arr := &Array{id, nil, btfArr.Nelems}
|
||||||
|
fixup(btfArr.Type, kindUnknown, &arr.Type)
|
||||||
|
typ = arr
|
||||||
|
|
||||||
|
case kindStruct:
|
||||||
|
members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag())
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("struct %s (id %d): %w", name, id, err)
|
||||||
|
}
|
||||||
|
typ = &Struct{id, name, raw.Size(), members}
|
||||||
|
|
||||||
|
case kindUnion:
|
||||||
|
members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag())
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("union %s (id %d): %w", name, id, err)
|
||||||
|
}
|
||||||
|
typ = &Union{id, name, raw.Size(), members}
|
||||||
|
|
||||||
|
case kindEnum:
|
||||||
|
rawvals := raw.data.([]btfEnum)
|
||||||
|
vals := make([]EnumValue, 0, len(rawvals))
|
||||||
|
for i, btfVal := range rawvals {
|
||||||
|
name, err := rawStrings.LookupName(btfVal.NameOff)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("get name for enum value %d: %s", i, err)
|
||||||
|
}
|
||||||
|
vals = append(vals, EnumValue{
|
||||||
|
Name: name,
|
||||||
|
Value: btfVal.Val,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
typ = &Enum{id, name, vals}
|
||||||
|
|
||||||
|
case kindForward:
|
||||||
|
if raw.KindFlag() {
|
||||||
|
typ = &Fwd{id, name, FwdUnion}
|
||||||
|
} else {
|
||||||
|
typ = &Fwd{id, name, FwdStruct}
|
||||||
|
}
|
||||||
|
|
||||||
|
case kindTypedef:
|
||||||
|
typedef := &Typedef{id, name, nil}
|
||||||
|
fixup(raw.Type(), kindUnknown, &typedef.Type)
|
||||||
|
typ = typedef
|
||||||
|
|
||||||
|
case kindVolatile:
|
||||||
|
volatile := &Volatile{id, nil}
|
||||||
|
fixup(raw.Type(), kindUnknown, &volatile.Type)
|
||||||
|
typ = volatile
|
||||||
|
|
||||||
|
case kindConst:
|
||||||
|
cnst := &Const{id, nil}
|
||||||
|
fixup(raw.Type(), kindUnknown, &cnst.Type)
|
||||||
|
typ = cnst
|
||||||
|
|
||||||
|
case kindRestrict:
|
||||||
|
restrict := &Restrict{id, nil}
|
||||||
|
fixup(raw.Type(), kindUnknown, &restrict.Type)
|
||||||
|
typ = restrict
|
||||||
|
|
||||||
|
case kindFunc:
|
||||||
|
fn := &Func{id, name, nil}
|
||||||
|
fixup(raw.Type(), kindFuncProto, &fn.Type)
|
||||||
|
typ = fn
|
||||||
|
|
||||||
|
case kindFuncProto:
|
||||||
|
rawparams := raw.data.([]btfParam)
|
||||||
|
params := make([]FuncParam, 0, len(rawparams))
|
||||||
|
for i, param := range rawparams {
|
||||||
|
name, err := rawStrings.LookupName(param.NameOff)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err)
|
||||||
|
}
|
||||||
|
params = append(params, FuncParam{
|
||||||
|
Name: name,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
for i := range params {
|
||||||
|
fixup(rawparams[i].Type, kindUnknown, ¶ms[i].Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
fp := &FuncProto{id, nil, params}
|
||||||
|
fixup(raw.Type(), kindUnknown, &fp.Return)
|
||||||
|
typ = fp
|
||||||
|
|
||||||
|
case kindVar:
|
||||||
|
v := &Var{id, name, nil}
|
||||||
|
fixup(raw.Type(), kindUnknown, &v.Type)
|
||||||
|
typ = v
|
||||||
|
|
||||||
|
case kindDatasec:
|
||||||
|
btfVars := raw.data.([]btfVarSecinfo)
|
||||||
|
vars := make([]VarSecinfo, 0, len(btfVars))
|
||||||
|
for _, btfVar := range btfVars {
|
||||||
|
vars = append(vars, VarSecinfo{
|
||||||
|
Offset: btfVar.Offset,
|
||||||
|
Size: btfVar.Size,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
for i := range vars {
|
||||||
|
fixup(btfVars[i].Type, kindVar, &vars[i].Type)
|
||||||
|
}
|
||||||
|
typ = &Datasec{id, name, raw.SizeType, vars}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return nil, nil, fmt.Errorf("type id %d: unknown kind: %v", id, raw.Kind())
|
||||||
|
}
|
||||||
|
|
||||||
|
types = append(types, typ)
|
||||||
|
|
||||||
|
if named, ok := typ.(namedType); ok {
|
||||||
|
if name := essentialName(named.name()); name != "" {
|
||||||
|
namedTypes[name] = append(namedTypes[name], named)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, fixup := range fixups {
|
||||||
|
i := int(fixup.id)
|
||||||
|
if i >= len(types) {
|
||||||
|
return nil, nil, fmt.Errorf("reference to invalid type id: %d", fixup.id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default void (id 0) to unknown
|
||||||
|
rawKind := kindUnknown
|
||||||
|
if i > 0 {
|
||||||
|
rawKind = rawTypes[i-1].Kind()
|
||||||
|
}
|
||||||
|
|
||||||
|
if expected := fixup.expectedKind; expected != kindUnknown && rawKind != expected {
|
||||||
|
return nil, nil, fmt.Errorf("expected type id %d to have kind %s, found %s", fixup.id, expected, rawKind)
|
||||||
|
}
|
||||||
|
|
||||||
|
*fixup.typ = types[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
return types, namedTypes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// essentialName returns name without a ___ suffix.
|
||||||
|
func essentialName(name string) string {
|
||||||
|
lastIdx := strings.LastIndex(name, "___")
|
||||||
|
if lastIdx > 0 {
|
||||||
|
return name[:lastIdx]
|
||||||
|
}
|
||||||
|
return name
|
||||||
|
}
|
|
@ -0,0 +1,62 @@
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
var sysCPU struct {
|
||||||
|
once sync.Once
|
||||||
|
err error
|
||||||
|
num int
|
||||||
|
}
|
||||||
|
|
||||||
|
// PossibleCPUs returns the max number of CPUs a system may possibly have
|
||||||
|
// Logical CPU numbers must be of the form 0-n
|
||||||
|
func PossibleCPUs() (int, error) {
|
||||||
|
sysCPU.once.Do(func() {
|
||||||
|
sysCPU.num, sysCPU.err = parseCPUsFromFile("/sys/devices/system/cpu/possible")
|
||||||
|
})
|
||||||
|
|
||||||
|
return sysCPU.num, sysCPU.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseCPUsFromFile(path string) (int, error) {
|
||||||
|
spec, err := ioutil.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := parseCPUs(string(spec))
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("can't parse %s: %v", path, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseCPUs parses the number of cpus from a string produced
|
||||||
|
// by bitmap_list_string() in the Linux kernel.
|
||||||
|
// Multiple ranges are rejected, since they can't be unified
|
||||||
|
// into a single number.
|
||||||
|
// This is the format of /sys/devices/system/cpu/possible, it
|
||||||
|
// is not suitable for /sys/devices/system/cpu/online, etc.
|
||||||
|
func parseCPUs(spec string) (int, error) {
|
||||||
|
if strings.Trim(spec, "\n") == "0" {
|
||||||
|
return 1, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var low, high int
|
||||||
|
n, err := fmt.Sscanf(spec, "%d-%d\n", &low, &high)
|
||||||
|
if n != 2 || err != nil {
|
||||||
|
return 0, fmt.Errorf("invalid format: %s", spec)
|
||||||
|
}
|
||||||
|
if low != 0 {
|
||||||
|
return 0, fmt.Errorf("CPU spec doesn't start at zero: %s", spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// cpus is 0 indexed
|
||||||
|
return high + 1, nil
|
||||||
|
}
|
|
@ -0,0 +1,52 @@
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"debug/elf"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SafeELFFile struct {
|
||||||
|
*elf.File
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSafeELFFile reads an ELF safely.
|
||||||
|
//
|
||||||
|
// Any panic during parsing is turned into an error. This is necessary since
|
||||||
|
// there are a bunch of unfixed bugs in debug/elf.
|
||||||
|
//
|
||||||
|
// https://github.com/golang/go/issues?q=is%3Aissue+is%3Aopen+debug%2Felf+in%3Atitle
|
||||||
|
func NewSafeELFFile(r io.ReaderAt) (safe *SafeELFFile, err error) {
|
||||||
|
defer func() {
|
||||||
|
r := recover()
|
||||||
|
if r == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
safe = nil
|
||||||
|
err = fmt.Errorf("reading ELF file panicked: %s", r)
|
||||||
|
}()
|
||||||
|
|
||||||
|
file, err := elf.NewFile(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &SafeELFFile{file}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Symbols is the safe version of elf.File.Symbols.
|
||||||
|
func (se *SafeELFFile) Symbols() (syms []elf.Symbol, err error) {
|
||||||
|
defer func() {
|
||||||
|
r := recover()
|
||||||
|
if r == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
syms = nil
|
||||||
|
err = fmt.Errorf("reading ELF symbols panicked: %s", r)
|
||||||
|
}()
|
||||||
|
|
||||||
|
syms, err = se.File.Symbols()
|
||||||
|
return
|
||||||
|
}
|
|
@ -0,0 +1,24 @@
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NativeEndian is set to either binary.BigEndian or binary.LittleEndian,
|
||||||
|
// depending on the host's endianness.
|
||||||
|
var NativeEndian binary.ByteOrder
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
if isBigEndian() {
|
||||||
|
NativeEndian = binary.BigEndian
|
||||||
|
} else {
|
||||||
|
NativeEndian = binary.LittleEndian
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isBigEndian() (ret bool) {
|
||||||
|
i := int(0x1)
|
||||||
|
bs := (*[int(unsafe.Sizeof(i))]byte)(unsafe.Pointer(&i))
|
||||||
|
return bs[0] == 0
|
||||||
|
}
|
|
@ -0,0 +1,47 @@
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/cilium/ebpf/internal/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrorWithLog returns an error that includes logs from the
|
||||||
|
// kernel verifier.
|
||||||
|
//
|
||||||
|
// logErr should be the error returned by the syscall that generated
|
||||||
|
// the log. It is used to check for truncation of the output.
|
||||||
|
func ErrorWithLog(err error, log []byte, logErr error) error {
|
||||||
|
logStr := strings.Trim(CString(log), "\t\r\n ")
|
||||||
|
if errors.Is(logErr, unix.ENOSPC) {
|
||||||
|
logStr += " (truncated...)"
|
||||||
|
}
|
||||||
|
|
||||||
|
return &VerifierError{err, logStr}
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifierError includes information from the eBPF verifier.
|
||||||
|
type VerifierError struct {
|
||||||
|
cause error
|
||||||
|
log string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (le *VerifierError) Error() string {
|
||||||
|
if le.log == "" {
|
||||||
|
return le.cause.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("%s: %s", le.cause, le.log)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CString turns a NUL / zero terminated byte buffer into a string.
|
||||||
|
func CString(in []byte) string {
|
||||||
|
inLen := bytes.IndexByte(in, 0)
|
||||||
|
if inLen == -1 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return string(in[:inLen])
|
||||||
|
}
|
|
@ -0,0 +1,69 @@
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/cilium/ebpf/internal/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
var ErrClosedFd = errors.New("use of closed file descriptor")
|
||||||
|
|
||||||
|
type FD struct {
|
||||||
|
raw int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewFD(value uint32) *FD {
|
||||||
|
fd := &FD{int64(value)}
|
||||||
|
runtime.SetFinalizer(fd, (*FD).Close)
|
||||||
|
return fd
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fd *FD) String() string {
|
||||||
|
return strconv.FormatInt(fd.raw, 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fd *FD) Value() (uint32, error) {
|
||||||
|
if fd.raw < 0 {
|
||||||
|
return 0, ErrClosedFd
|
||||||
|
}
|
||||||
|
|
||||||
|
return uint32(fd.raw), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fd *FD) Close() error {
|
||||||
|
if fd.raw < 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
value := int(fd.raw)
|
||||||
|
fd.raw = -1
|
||||||
|
|
||||||
|
fd.Forget()
|
||||||
|
return unix.Close(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fd *FD) Forget() {
|
||||||
|
runtime.SetFinalizer(fd, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fd *FD) Dup() (*FD, error) {
|
||||||
|
if fd.raw < 0 {
|
||||||
|
return nil, ErrClosedFd
|
||||||
|
}
|
||||||
|
|
||||||
|
dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't dup fd: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return NewFD(uint32(dup)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fd *FD) File(name string) *os.File {
|
||||||
|
fd.Forget()
|
||||||
|
return os.NewFile(uintptr(fd.raw), name)
|
||||||
|
}
|
|
@ -0,0 +1,138 @@
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrNotSupported indicates that a feature is not supported by the current kernel.
|
||||||
|
var ErrNotSupported = errors.New("not supported")
|
||||||
|
|
||||||
|
// UnsupportedFeatureError is returned by FeatureTest() functions.
|
||||||
|
type UnsupportedFeatureError struct {
|
||||||
|
// The minimum Linux mainline version required for this feature.
|
||||||
|
// Used for the error string, and for sanity checking during testing.
|
||||||
|
MinimumVersion Version
|
||||||
|
|
||||||
|
// The name of the feature that isn't supported.
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ufe *UnsupportedFeatureError) Error() string {
|
||||||
|
if ufe.MinimumVersion.Unspecified() {
|
||||||
|
return fmt.Sprintf("%s not supported", ufe.Name)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s not supported (requires >= %s)", ufe.Name, ufe.MinimumVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Is indicates that UnsupportedFeatureError is ErrNotSupported.
|
||||||
|
func (ufe *UnsupportedFeatureError) Is(target error) bool {
|
||||||
|
return target == ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
type featureTest struct {
|
||||||
|
sync.RWMutex
|
||||||
|
successful bool
|
||||||
|
result error
|
||||||
|
}
|
||||||
|
|
||||||
|
// FeatureTestFn is used to determine whether the kernel supports
|
||||||
|
// a certain feature.
|
||||||
|
//
|
||||||
|
// The return values have the following semantics:
|
||||||
|
//
|
||||||
|
// err == ErrNotSupported: the feature is not available
|
||||||
|
// err == nil: the feature is available
|
||||||
|
// err != nil: the test couldn't be executed
|
||||||
|
type FeatureTestFn func() error
|
||||||
|
|
||||||
|
// FeatureTest wraps a function so that it is run at most once.
|
||||||
|
//
|
||||||
|
// name should identify the tested feature, while version must be in the
|
||||||
|
// form Major.Minor[.Patch].
|
||||||
|
//
|
||||||
|
// Returns an error wrapping ErrNotSupported if the feature is not supported.
|
||||||
|
func FeatureTest(name, version string, fn FeatureTestFn) func() error {
|
||||||
|
v, err := NewVersion(version)
|
||||||
|
if err != nil {
|
||||||
|
return func() error { return err }
|
||||||
|
}
|
||||||
|
|
||||||
|
ft := new(featureTest)
|
||||||
|
return func() error {
|
||||||
|
ft.RLock()
|
||||||
|
if ft.successful {
|
||||||
|
defer ft.RUnlock()
|
||||||
|
return ft.result
|
||||||
|
}
|
||||||
|
ft.RUnlock()
|
||||||
|
ft.Lock()
|
||||||
|
defer ft.Unlock()
|
||||||
|
// check one more time on the off
|
||||||
|
// chance that two go routines
|
||||||
|
// were able to call into the write
|
||||||
|
// lock
|
||||||
|
if ft.successful {
|
||||||
|
return ft.result
|
||||||
|
}
|
||||||
|
err := fn()
|
||||||
|
switch {
|
||||||
|
case errors.Is(err, ErrNotSupported):
|
||||||
|
ft.result = &UnsupportedFeatureError{
|
||||||
|
MinimumVersion: v,
|
||||||
|
Name: name,
|
||||||
|
}
|
||||||
|
fallthrough
|
||||||
|
|
||||||
|
case err == nil:
|
||||||
|
ft.successful = true
|
||||||
|
|
||||||
|
default:
|
||||||
|
// We couldn't execute the feature test to a point
|
||||||
|
// where it could make a determination.
|
||||||
|
// Don't cache the result, just return it.
|
||||||
|
return fmt.Errorf("detect support for %s: %w", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ft.result
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Version in the form Major.Minor.Patch.
|
||||||
|
type Version [3]uint16
|
||||||
|
|
||||||
|
// NewVersion creates a version from a string like "Major.Minor.Patch".
|
||||||
|
//
|
||||||
|
// Patch is optional.
|
||||||
|
func NewVersion(ver string) (Version, error) {
|
||||||
|
var major, minor, patch uint16
|
||||||
|
n, _ := fmt.Sscanf(ver, "%d.%d.%d", &major, &minor, &patch)
|
||||||
|
if n < 2 {
|
||||||
|
return Version{}, fmt.Errorf("invalid version: %s", ver)
|
||||||
|
}
|
||||||
|
return Version{major, minor, patch}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v Version) String() string {
|
||||||
|
if v[2] == 0 {
|
||||||
|
return fmt.Sprintf("v%d.%d", v[0], v[1])
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("v%d.%d.%d", v[0], v[1], v[2])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Less returns true if the version is less than another version.
|
||||||
|
func (v Version) Less(other Version) bool {
|
||||||
|
for i, a := range v {
|
||||||
|
if a == other[i] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return a < other[i]
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unspecified returns true if the version is all zero.
|
||||||
|
func (v Version) Unspecified() bool {
|
||||||
|
return v[0] == 0 && v[1] == 0 && v[2] == 0
|
||||||
|
}
|
|
@ -0,0 +1,16 @@
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
// DiscardZeroes makes sure that all written bytes are zero
|
||||||
|
// before discarding them.
|
||||||
|
type DiscardZeroes struct{}
|
||||||
|
|
||||||
|
func (DiscardZeroes) Write(p []byte) (int, error) {
|
||||||
|
for _, b := range p {
|
||||||
|
if b != 0 {
|
||||||
|
return 0, errors.New("encountered non-zero byte")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(p), nil
|
||||||
|
}
|
|
@ -0,0 +1,30 @@
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import "unsafe"
|
||||||
|
|
||||||
|
// NewPointer creates a 64-bit pointer from an unsafe Pointer.
|
||||||
|
func NewPointer(ptr unsafe.Pointer) Pointer {
|
||||||
|
return Pointer{ptr: ptr}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSlicePointer creates a 64-bit pointer from a byte slice.
|
||||||
|
func NewSlicePointer(buf []byte) Pointer {
|
||||||
|
if len(buf) == 0 {
|
||||||
|
return Pointer{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return Pointer{ptr: unsafe.Pointer(&buf[0])}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStringPointer creates a 64-bit pointer from a string.
|
||||||
|
func NewStringPointer(str string) Pointer {
|
||||||
|
if str == "" {
|
||||||
|
return Pointer{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The kernel expects strings to be zero terminated
|
||||||
|
buf := make([]byte, len(str)+1)
|
||||||
|
copy(buf, str)
|
||||||
|
|
||||||
|
return Pointer{ptr: unsafe.Pointer(&buf[0])}
|
||||||
|
}
|
|
@ -0,0 +1,14 @@
|
||||||
|
// +build armbe mips mips64p32
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Pointer wraps an unsafe.Pointer to be 64bit to
|
||||||
|
// conform to the syscall specification.
|
||||||
|
type Pointer struct {
|
||||||
|
pad uint32
|
||||||
|
ptr unsafe.Pointer
|
||||||
|
}
|
|
@ -0,0 +1,14 @@
|
||||||
|
// +build 386 amd64p32 arm mipsle mips64p32le
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Pointer wraps an unsafe.Pointer to be 64bit to
|
||||||
|
// conform to the syscall specification.
|
||||||
|
type Pointer struct {
|
||||||
|
ptr unsafe.Pointer
|
||||||
|
pad uint32
|
||||||
|
}
|
|
@ -0,0 +1,14 @@
|
||||||
|
// +build !386,!amd64p32,!arm,!mipsle,!mips64p32le
|
||||||
|
// +build !armbe,!mips,!mips64p32
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Pointer wraps an unsafe.Pointer to be 64bit to
|
||||||
|
// conform to the syscall specification.
|
||||||
|
type Pointer struct {
|
||||||
|
ptr unsafe.Pointer
|
||||||
|
}
|
|
@ -0,0 +1,179 @@
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/cilium/ebpf/internal/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:generate stringer -output syscall_string.go -type=BPFCmd
|
||||||
|
|
||||||
|
// BPFCmd identifies a subcommand of the bpf syscall.
|
||||||
|
type BPFCmd int
|
||||||
|
|
||||||
|
// Well known BPF commands.
|
||||||
|
const (
|
||||||
|
BPF_MAP_CREATE BPFCmd = iota
|
||||||
|
BPF_MAP_LOOKUP_ELEM
|
||||||
|
BPF_MAP_UPDATE_ELEM
|
||||||
|
BPF_MAP_DELETE_ELEM
|
||||||
|
BPF_MAP_GET_NEXT_KEY
|
||||||
|
BPF_PROG_LOAD
|
||||||
|
BPF_OBJ_PIN
|
||||||
|
BPF_OBJ_GET
|
||||||
|
BPF_PROG_ATTACH
|
||||||
|
BPF_PROG_DETACH
|
||||||
|
BPF_PROG_TEST_RUN
|
||||||
|
BPF_PROG_GET_NEXT_ID
|
||||||
|
BPF_MAP_GET_NEXT_ID
|
||||||
|
BPF_PROG_GET_FD_BY_ID
|
||||||
|
BPF_MAP_GET_FD_BY_ID
|
||||||
|
BPF_OBJ_GET_INFO_BY_FD
|
||||||
|
BPF_PROG_QUERY
|
||||||
|
BPF_RAW_TRACEPOINT_OPEN
|
||||||
|
BPF_BTF_LOAD
|
||||||
|
BPF_BTF_GET_FD_BY_ID
|
||||||
|
BPF_TASK_FD_QUERY
|
||||||
|
BPF_MAP_LOOKUP_AND_DELETE_ELEM
|
||||||
|
BPF_MAP_FREEZE
|
||||||
|
BPF_BTF_GET_NEXT_ID
|
||||||
|
BPF_MAP_LOOKUP_BATCH
|
||||||
|
BPF_MAP_LOOKUP_AND_DELETE_BATCH
|
||||||
|
BPF_MAP_UPDATE_BATCH
|
||||||
|
BPF_MAP_DELETE_BATCH
|
||||||
|
BPF_LINK_CREATE
|
||||||
|
BPF_LINK_UPDATE
|
||||||
|
BPF_LINK_GET_FD_BY_ID
|
||||||
|
BPF_LINK_GET_NEXT_ID
|
||||||
|
BPF_ENABLE_STATS
|
||||||
|
BPF_ITER_CREATE
|
||||||
|
)
|
||||||
|
|
||||||
|
// BPF wraps SYS_BPF.
|
||||||
|
//
|
||||||
|
// Any pointers contained in attr must use the Pointer type from this package.
|
||||||
|
func BPF(cmd BPFCmd, attr unsafe.Pointer, size uintptr) (uintptr, error) {
|
||||||
|
r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size)
|
||||||
|
runtime.KeepAlive(attr)
|
||||||
|
|
||||||
|
var err error
|
||||||
|
if errNo != 0 {
|
||||||
|
err = errNo
|
||||||
|
}
|
||||||
|
|
||||||
|
return r1, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type BPFProgAttachAttr struct {
|
||||||
|
TargetFd uint32
|
||||||
|
AttachBpfFd uint32
|
||||||
|
AttachType uint32
|
||||||
|
AttachFlags uint32
|
||||||
|
ReplaceBpfFd uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func BPFProgAttach(attr *BPFProgAttachAttr) error {
|
||||||
|
_, err := BPF(BPF_PROG_ATTACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
type BPFProgDetachAttr struct {
|
||||||
|
TargetFd uint32
|
||||||
|
AttachBpfFd uint32
|
||||||
|
AttachType uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func BPFProgDetach(attr *BPFProgDetachAttr) error {
|
||||||
|
_, err := BPF(BPF_PROG_DETACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
type BPFEnableStatsAttr struct {
|
||||||
|
StatsType uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func BPFEnableStats(attr *BPFEnableStatsAttr) (*FD, error) {
|
||||||
|
ptr, err := BPF(BPF_ENABLE_STATS, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("enable stats: %w", err)
|
||||||
|
}
|
||||||
|
return NewFD(uint32(ptr)), nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
type bpfObjAttr struct {
|
||||||
|
fileName Pointer
|
||||||
|
fd uint32
|
||||||
|
fileFlags uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
const bpfFSType = 0xcafe4a11
|
||||||
|
|
||||||
|
// BPFObjPin wraps BPF_OBJ_PIN.
|
||||||
|
func BPFObjPin(fileName string, fd *FD) error {
|
||||||
|
dirName := filepath.Dir(fileName)
|
||||||
|
var statfs unix.Statfs_t
|
||||||
|
if err := unix.Statfs(dirName, &statfs); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if uint64(statfs.Type) != bpfFSType {
|
||||||
|
return fmt.Errorf("%s is not on a bpf filesystem", fileName)
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := fd.Value()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
attr := bpfObjAttr{
|
||||||
|
fileName: NewStringPointer(fileName),
|
||||||
|
fd: value,
|
||||||
|
}
|
||||||
|
_, err = BPF(BPF_OBJ_PIN, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("pin object %s: %w", fileName, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BPFObjGet wraps BPF_OBJ_GET.
|
||||||
|
func BPFObjGet(fileName string) (*FD, error) {
|
||||||
|
attr := bpfObjAttr{
|
||||||
|
fileName: NewStringPointer(fileName),
|
||||||
|
}
|
||||||
|
ptr, err := BPF(BPF_OBJ_GET, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("get object %s: %w", fileName, err)
|
||||||
|
}
|
||||||
|
return NewFD(uint32(ptr)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type bpfObjGetInfoByFDAttr struct {
|
||||||
|
fd uint32
|
||||||
|
infoLen uint32
|
||||||
|
info Pointer
|
||||||
|
}
|
||||||
|
|
||||||
|
// BPFObjGetInfoByFD wraps BPF_OBJ_GET_INFO_BY_FD.
|
||||||
|
//
|
||||||
|
// Available from 4.13.
|
||||||
|
func BPFObjGetInfoByFD(fd *FD, info unsafe.Pointer, size uintptr) error {
|
||||||
|
value, err := fd.Value()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
attr := bpfObjGetInfoByFDAttr{
|
||||||
|
fd: value,
|
||||||
|
infoLen: uint32(size),
|
||||||
|
info: NewPointer(info),
|
||||||
|
}
|
||||||
|
_, err = BPF(BPF_OBJ_GET_INFO_BY_FD, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("fd %v: %w", fd, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,56 @@
|
||||||
|
// Code generated by "stringer -output syscall_string.go -type=BPFCmd"; DO NOT EDIT.
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import "strconv"
|
||||||
|
|
||||||
|
func _() {
|
||||||
|
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||||
|
// Re-run the stringer command to generate them again.
|
||||||
|
var x [1]struct{}
|
||||||
|
_ = x[BPF_MAP_CREATE-0]
|
||||||
|
_ = x[BPF_MAP_LOOKUP_ELEM-1]
|
||||||
|
_ = x[BPF_MAP_UPDATE_ELEM-2]
|
||||||
|
_ = x[BPF_MAP_DELETE_ELEM-3]
|
||||||
|
_ = x[BPF_MAP_GET_NEXT_KEY-4]
|
||||||
|
_ = x[BPF_PROG_LOAD-5]
|
||||||
|
_ = x[BPF_OBJ_PIN-6]
|
||||||
|
_ = x[BPF_OBJ_GET-7]
|
||||||
|
_ = x[BPF_PROG_ATTACH-8]
|
||||||
|
_ = x[BPF_PROG_DETACH-9]
|
||||||
|
_ = x[BPF_PROG_TEST_RUN-10]
|
||||||
|
_ = x[BPF_PROG_GET_NEXT_ID-11]
|
||||||
|
_ = x[BPF_MAP_GET_NEXT_ID-12]
|
||||||
|
_ = x[BPF_PROG_GET_FD_BY_ID-13]
|
||||||
|
_ = x[BPF_MAP_GET_FD_BY_ID-14]
|
||||||
|
_ = x[BPF_OBJ_GET_INFO_BY_FD-15]
|
||||||
|
_ = x[BPF_PROG_QUERY-16]
|
||||||
|
_ = x[BPF_RAW_TRACEPOINT_OPEN-17]
|
||||||
|
_ = x[BPF_BTF_LOAD-18]
|
||||||
|
_ = x[BPF_BTF_GET_FD_BY_ID-19]
|
||||||
|
_ = x[BPF_TASK_FD_QUERY-20]
|
||||||
|
_ = x[BPF_MAP_LOOKUP_AND_DELETE_ELEM-21]
|
||||||
|
_ = x[BPF_MAP_FREEZE-22]
|
||||||
|
_ = x[BPF_BTF_GET_NEXT_ID-23]
|
||||||
|
_ = x[BPF_MAP_LOOKUP_BATCH-24]
|
||||||
|
_ = x[BPF_MAP_LOOKUP_AND_DELETE_BATCH-25]
|
||||||
|
_ = x[BPF_MAP_UPDATE_BATCH-26]
|
||||||
|
_ = x[BPF_MAP_DELETE_BATCH-27]
|
||||||
|
_ = x[BPF_LINK_CREATE-28]
|
||||||
|
_ = x[BPF_LINK_UPDATE-29]
|
||||||
|
_ = x[BPF_LINK_GET_FD_BY_ID-30]
|
||||||
|
_ = x[BPF_LINK_GET_NEXT_ID-31]
|
||||||
|
_ = x[BPF_ENABLE_STATS-32]
|
||||||
|
_ = x[BPF_ITER_CREATE-33]
|
||||||
|
}
|
||||||
|
|
||||||
|
const _BPFCmd_name = "BPF_MAP_CREATEBPF_MAP_LOOKUP_ELEMBPF_MAP_UPDATE_ELEMBPF_MAP_DELETE_ELEMBPF_MAP_GET_NEXT_KEYBPF_PROG_LOADBPF_OBJ_PINBPF_OBJ_GETBPF_PROG_ATTACHBPF_PROG_DETACHBPF_PROG_TEST_RUNBPF_PROG_GET_NEXT_IDBPF_MAP_GET_NEXT_IDBPF_PROG_GET_FD_BY_IDBPF_MAP_GET_FD_BY_IDBPF_OBJ_GET_INFO_BY_FDBPF_PROG_QUERYBPF_RAW_TRACEPOINT_OPENBPF_BTF_LOADBPF_BTF_GET_FD_BY_IDBPF_TASK_FD_QUERYBPF_MAP_LOOKUP_AND_DELETE_ELEMBPF_MAP_FREEZEBPF_BTF_GET_NEXT_IDBPF_MAP_LOOKUP_BATCHBPF_MAP_LOOKUP_AND_DELETE_BATCHBPF_MAP_UPDATE_BATCHBPF_MAP_DELETE_BATCHBPF_LINK_CREATEBPF_LINK_UPDATEBPF_LINK_GET_FD_BY_IDBPF_LINK_GET_NEXT_IDBPF_ENABLE_STATSBPF_ITER_CREATE"
|
||||||
|
|
||||||
|
var _BPFCmd_index = [...]uint16{0, 14, 33, 52, 71, 91, 104, 115, 126, 141, 156, 173, 193, 212, 233, 253, 275, 289, 312, 324, 344, 361, 391, 405, 424, 444, 475, 495, 515, 530, 545, 566, 586, 602, 617}
|
||||||
|
|
||||||
|
func (i BPFCmd) String() string {
|
||||||
|
if i < 0 || i >= BPFCmd(len(_BPFCmd_index)-1) {
|
||||||
|
return "BPFCmd(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||||
|
}
|
||||||
|
return _BPFCmd_name[_BPFCmd_index[i]:_BPFCmd_index[i+1]]
|
||||||
|
}
|
|
@ -0,0 +1,170 @@
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package unix
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
linux "golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
ENOENT = linux.ENOENT
|
||||||
|
EEXIST = linux.EEXIST
|
||||||
|
EAGAIN = linux.EAGAIN
|
||||||
|
ENOSPC = linux.ENOSPC
|
||||||
|
EINVAL = linux.EINVAL
|
||||||
|
EPOLLIN = linux.EPOLLIN
|
||||||
|
EINTR = linux.EINTR
|
||||||
|
EPERM = linux.EPERM
|
||||||
|
ESRCH = linux.ESRCH
|
||||||
|
ENODEV = linux.ENODEV
|
||||||
|
// ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP
|
||||||
|
ENOTSUPP = syscall.Errno(0x20c)
|
||||||
|
|
||||||
|
EBADF = linux.EBADF
|
||||||
|
BPF_F_NO_PREALLOC = linux.BPF_F_NO_PREALLOC
|
||||||
|
BPF_F_NUMA_NODE = linux.BPF_F_NUMA_NODE
|
||||||
|
BPF_F_RDONLY_PROG = linux.BPF_F_RDONLY_PROG
|
||||||
|
BPF_F_WRONLY_PROG = linux.BPF_F_WRONLY_PROG
|
||||||
|
BPF_OBJ_NAME_LEN = linux.BPF_OBJ_NAME_LEN
|
||||||
|
BPF_TAG_SIZE = linux.BPF_TAG_SIZE
|
||||||
|
SYS_BPF = linux.SYS_BPF
|
||||||
|
F_DUPFD_CLOEXEC = linux.F_DUPFD_CLOEXEC
|
||||||
|
EPOLL_CTL_ADD = linux.EPOLL_CTL_ADD
|
||||||
|
EPOLL_CLOEXEC = linux.EPOLL_CLOEXEC
|
||||||
|
O_CLOEXEC = linux.O_CLOEXEC
|
||||||
|
O_NONBLOCK = linux.O_NONBLOCK
|
||||||
|
PROT_READ = linux.PROT_READ
|
||||||
|
PROT_WRITE = linux.PROT_WRITE
|
||||||
|
MAP_SHARED = linux.MAP_SHARED
|
||||||
|
PERF_TYPE_SOFTWARE = linux.PERF_TYPE_SOFTWARE
|
||||||
|
PERF_COUNT_SW_BPF_OUTPUT = linux.PERF_COUNT_SW_BPF_OUTPUT
|
||||||
|
PerfBitWatermark = linux.PerfBitWatermark
|
||||||
|
PERF_SAMPLE_RAW = linux.PERF_SAMPLE_RAW
|
||||||
|
PERF_FLAG_FD_CLOEXEC = linux.PERF_FLAG_FD_CLOEXEC
|
||||||
|
RLIM_INFINITY = linux.RLIM_INFINITY
|
||||||
|
RLIMIT_MEMLOCK = linux.RLIMIT_MEMLOCK
|
||||||
|
BPF_STATS_RUN_TIME = linux.BPF_STATS_RUN_TIME
|
||||||
|
)
|
||||||
|
|
||||||
|
// Statfs_t is a wrapper
|
||||||
|
type Statfs_t = linux.Statfs_t
|
||||||
|
|
||||||
|
// Rlimit is a wrapper
|
||||||
|
type Rlimit = linux.Rlimit
|
||||||
|
|
||||||
|
// Setrlimit is a wrapper
|
||||||
|
func Setrlimit(resource int, rlim *Rlimit) (err error) {
|
||||||
|
return linux.Setrlimit(resource, rlim)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Syscall is a wrapper
|
||||||
|
func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
|
||||||
|
return linux.Syscall(trap, a1, a2, a3)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FcntlInt is a wrapper
|
||||||
|
func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
|
||||||
|
return linux.FcntlInt(fd, cmd, arg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Statfs is a wrapper
|
||||||
|
func Statfs(path string, buf *Statfs_t) (err error) {
|
||||||
|
return linux.Statfs(path, buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close is a wrapper
|
||||||
|
func Close(fd int) (err error) {
|
||||||
|
return linux.Close(fd)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EpollEvent is a wrapper
|
||||||
|
type EpollEvent = linux.EpollEvent
|
||||||
|
|
||||||
|
// EpollWait is a wrapper
|
||||||
|
func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
|
||||||
|
return linux.EpollWait(epfd, events, msec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EpollCtl is a wrapper
|
||||||
|
func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) {
|
||||||
|
return linux.EpollCtl(epfd, op, fd, event)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Eventfd is a wrapper
|
||||||
|
func Eventfd(initval uint, flags int) (fd int, err error) {
|
||||||
|
return linux.Eventfd(initval, flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write is a wrapper
|
||||||
|
func Write(fd int, p []byte) (n int, err error) {
|
||||||
|
return linux.Write(fd, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EpollCreate1 is a wrapper
|
||||||
|
func EpollCreate1(flag int) (fd int, err error) {
|
||||||
|
return linux.EpollCreate1(flag)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PerfEventMmapPage is a wrapper
|
||||||
|
type PerfEventMmapPage linux.PerfEventMmapPage
|
||||||
|
|
||||||
|
// SetNonblock is a wrapper
|
||||||
|
func SetNonblock(fd int, nonblocking bool) (err error) {
|
||||||
|
return linux.SetNonblock(fd, nonblocking)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mmap is a wrapper
|
||||||
|
func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
|
||||||
|
return linux.Mmap(fd, offset, length, prot, flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Munmap is a wrapper
|
||||||
|
func Munmap(b []byte) (err error) {
|
||||||
|
return linux.Munmap(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PerfEventAttr is a wrapper
|
||||||
|
type PerfEventAttr = linux.PerfEventAttr
|
||||||
|
|
||||||
|
// PerfEventOpen is a wrapper
|
||||||
|
func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) {
|
||||||
|
return linux.PerfEventOpen(attr, pid, cpu, groupFd, flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Utsname is a wrapper
|
||||||
|
type Utsname = linux.Utsname
|
||||||
|
|
||||||
|
// Uname is a wrapper
|
||||||
|
func Uname(buf *Utsname) (err error) {
|
||||||
|
return linux.Uname(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Getpid is a wrapper
|
||||||
|
func Getpid() int {
|
||||||
|
return linux.Getpid()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gettid is a wrapper
|
||||||
|
func Gettid() int {
|
||||||
|
return linux.Gettid()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tgkill is a wrapper
|
||||||
|
func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) {
|
||||||
|
return linux.Tgkill(tgid, tid, sig)
|
||||||
|
}
|
||||||
|
|
||||||
|
func KernelRelease() (string, error) {
|
||||||
|
var uname Utsname
|
||||||
|
err := Uname(&uname)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
end := bytes.IndexByte(uname.Release[:], 0)
|
||||||
|
release := string(uname.Release[:end])
|
||||||
|
return release, nil
|
||||||
|
}
|
|
@ -0,0 +1,228 @@
|
||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package unix
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
var errNonLinux = fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH)
|
||||||
|
|
||||||
|
const (
|
||||||
|
ENOENT = syscall.ENOENT
|
||||||
|
EEXIST = syscall.EEXIST
|
||||||
|
EAGAIN = syscall.EAGAIN
|
||||||
|
ENOSPC = syscall.ENOSPC
|
||||||
|
EINVAL = syscall.EINVAL
|
||||||
|
EINTR = syscall.EINTR
|
||||||
|
EPERM = syscall.EPERM
|
||||||
|
ESRCH = syscall.ESRCH
|
||||||
|
ENODEV = syscall.ENODEV
|
||||||
|
EBADF = syscall.Errno(0)
|
||||||
|
// ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP
|
||||||
|
ENOTSUPP = syscall.Errno(0x20c)
|
||||||
|
|
||||||
|
BPF_F_NO_PREALLOC = 0
|
||||||
|
BPF_F_NUMA_NODE = 0
|
||||||
|
BPF_F_RDONLY_PROG = 0
|
||||||
|
BPF_F_WRONLY_PROG = 0
|
||||||
|
BPF_OBJ_NAME_LEN = 0x10
|
||||||
|
BPF_TAG_SIZE = 0x8
|
||||||
|
SYS_BPF = 321
|
||||||
|
F_DUPFD_CLOEXEC = 0x406
|
||||||
|
EPOLLIN = 0x1
|
||||||
|
EPOLL_CTL_ADD = 0x1
|
||||||
|
EPOLL_CLOEXEC = 0x80000
|
||||||
|
O_CLOEXEC = 0x80000
|
||||||
|
O_NONBLOCK = 0x800
|
||||||
|
PROT_READ = 0x1
|
||||||
|
PROT_WRITE = 0x2
|
||||||
|
MAP_SHARED = 0x1
|
||||||
|
PERF_TYPE_SOFTWARE = 0x1
|
||||||
|
PERF_COUNT_SW_BPF_OUTPUT = 0xa
|
||||||
|
PerfBitWatermark = 0x4000
|
||||||
|
PERF_SAMPLE_RAW = 0x400
|
||||||
|
PERF_FLAG_FD_CLOEXEC = 0x8
|
||||||
|
RLIM_INFINITY = 0x7fffffffffffffff
|
||||||
|
RLIMIT_MEMLOCK = 8
|
||||||
|
BPF_STATS_RUN_TIME = 0
|
||||||
|
)
|
||||||
|
|
||||||
|
// Statfs_t is a wrapper
|
||||||
|
type Statfs_t struct {
|
||||||
|
Type int64
|
||||||
|
Bsize int64
|
||||||
|
Blocks uint64
|
||||||
|
Bfree uint64
|
||||||
|
Bavail uint64
|
||||||
|
Files uint64
|
||||||
|
Ffree uint64
|
||||||
|
Fsid [2]int32
|
||||||
|
Namelen int64
|
||||||
|
Frsize int64
|
||||||
|
Flags int64
|
||||||
|
Spare [4]int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rlimit is a wrapper
|
||||||
|
type Rlimit struct {
|
||||||
|
Cur uint64
|
||||||
|
Max uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setrlimit is a wrapper
|
||||||
|
func Setrlimit(resource int, rlim *Rlimit) (err error) {
|
||||||
|
return errNonLinux
|
||||||
|
}
|
||||||
|
|
||||||
|
// Syscall is a wrapper
|
||||||
|
func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
|
||||||
|
return 0, 0, syscall.Errno(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FcntlInt is a wrapper
|
||||||
|
func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
|
||||||
|
return -1, errNonLinux
|
||||||
|
}
|
||||||
|
|
||||||
|
// Statfs is a wrapper
|
||||||
|
func Statfs(path string, buf *Statfs_t) error {
|
||||||
|
return errNonLinux
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close is a wrapper
|
||||||
|
func Close(fd int) (err error) {
|
||||||
|
return errNonLinux
|
||||||
|
}
|
||||||
|
|
||||||
|
// EpollEvent is a wrapper
|
||||||
|
type EpollEvent struct {
|
||||||
|
Events uint32
|
||||||
|
Fd int32
|
||||||
|
Pad int32
|
||||||
|
}
|
||||||
|
|
||||||
|
// EpollWait is a wrapper
|
||||||
|
func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
|
||||||
|
return 0, errNonLinux
|
||||||
|
}
|
||||||
|
|
||||||
|
// EpollCtl is a wrapper
|
||||||
|
func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) {
|
||||||
|
return errNonLinux
|
||||||
|
}
|
||||||
|
|
||||||
|
// Eventfd is a wrapper
|
||||||
|
func Eventfd(initval uint, flags int) (fd int, err error) {
|
||||||
|
return 0, errNonLinux
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write is a wrapper
|
||||||
|
func Write(fd int, p []byte) (n int, err error) {
|
||||||
|
return 0, errNonLinux
|
||||||
|
}
|
||||||
|
|
||||||
|
// EpollCreate1 is a wrapper
|
||||||
|
func EpollCreate1(flag int) (fd int, err error) {
|
||||||
|
return 0, errNonLinux
|
||||||
|
}
|
||||||
|
|
||||||
|
// PerfEventMmapPage is a wrapper
|
||||||
|
type PerfEventMmapPage struct {
|
||||||
|
Version uint32
|
||||||
|
Compat_version uint32
|
||||||
|
Lock uint32
|
||||||
|
Index uint32
|
||||||
|
Offset int64
|
||||||
|
Time_enabled uint64
|
||||||
|
Time_running uint64
|
||||||
|
Capabilities uint64
|
||||||
|
Pmc_width uint16
|
||||||
|
Time_shift uint16
|
||||||
|
Time_mult uint32
|
||||||
|
Time_offset uint64
|
||||||
|
Time_zero uint64
|
||||||
|
Size uint32
|
||||||
|
|
||||||
|
Data_head uint64
|
||||||
|
Data_tail uint64
|
||||||
|
Data_offset uint64
|
||||||
|
Data_size uint64
|
||||||
|
Aux_head uint64
|
||||||
|
Aux_tail uint64
|
||||||
|
Aux_offset uint64
|
||||||
|
Aux_size uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNonblock is a wrapper
|
||||||
|
func SetNonblock(fd int, nonblocking bool) (err error) {
|
||||||
|
return errNonLinux
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mmap is a wrapper
|
||||||
|
func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
|
||||||
|
return []byte{}, errNonLinux
|
||||||
|
}
|
||||||
|
|
||||||
|
// Munmap is a wrapper
|
||||||
|
func Munmap(b []byte) (err error) {
|
||||||
|
return errNonLinux
|
||||||
|
}
|
||||||
|
|
||||||
|
// PerfEventAttr is a wrapper
|
||||||
|
type PerfEventAttr struct {
|
||||||
|
Type uint32
|
||||||
|
Size uint32
|
||||||
|
Config uint64
|
||||||
|
Sample uint64
|
||||||
|
Sample_type uint64
|
||||||
|
Read_format uint64
|
||||||
|
Bits uint64
|
||||||
|
Wakeup uint32
|
||||||
|
Bp_type uint32
|
||||||
|
Ext1 uint64
|
||||||
|
Ext2 uint64
|
||||||
|
Branch_sample_type uint64
|
||||||
|
Sample_regs_user uint64
|
||||||
|
Sample_stack_user uint32
|
||||||
|
Clockid int32
|
||||||
|
Sample_regs_intr uint64
|
||||||
|
Aux_watermark uint32
|
||||||
|
Sample_max_stack uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
// PerfEventOpen is a wrapper
|
||||||
|
func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) {
|
||||||
|
return 0, errNonLinux
|
||||||
|
}
|
||||||
|
|
||||||
|
// Utsname is a wrapper
|
||||||
|
type Utsname struct {
|
||||||
|
Release [65]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uname is a wrapper
|
||||||
|
func Uname(buf *Utsname) (err error) {
|
||||||
|
return errNonLinux
|
||||||
|
}
|
||||||
|
|
||||||
|
// Getpid is a wrapper
|
||||||
|
func Getpid() int {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gettid is a wrapper
|
||||||
|
func Gettid() int {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tgkill is a wrapper
|
||||||
|
func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) {
|
||||||
|
return errNonLinux
|
||||||
|
}
|
||||||
|
|
||||||
|
func KernelRelease() (string, error) {
|
||||||
|
return "", errNonLinux
|
||||||
|
}
|
|
@ -0,0 +1,169 @@
|
||||||
|
package link
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/cilium/ebpf"
|
||||||
|
)
|
||||||
|
|
||||||
|
type cgroupAttachFlags uint32
|
||||||
|
|
||||||
|
// cgroup attach flags
|
||||||
|
const (
|
||||||
|
flagAllowOverride cgroupAttachFlags = 1 << iota
|
||||||
|
flagAllowMulti
|
||||||
|
flagReplace
|
||||||
|
)
|
||||||
|
|
||||||
|
type CgroupOptions struct {
|
||||||
|
// Path to a cgroupv2 folder.
|
||||||
|
Path string
|
||||||
|
// One of the AttachCgroup* constants
|
||||||
|
Attach ebpf.AttachType
|
||||||
|
// Program must be of type CGroup*, and the attach type must match Attach.
|
||||||
|
Program *ebpf.Program
|
||||||
|
}
|
||||||
|
|
||||||
|
// AttachCgroup links a BPF program to a cgroup.
|
||||||
|
func AttachCgroup(opts CgroupOptions) (Link, error) {
|
||||||
|
cgroup, err := os.Open(opts.Path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't open cgroup: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
clone, err := opts.Program.Clone()
|
||||||
|
if err != nil {
|
||||||
|
cgroup.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var cg Link
|
||||||
|
cg, err = newLinkCgroup(cgroup, opts.Attach, clone)
|
||||||
|
if errors.Is(err, ErrNotSupported) {
|
||||||
|
cg, err = newProgAttachCgroup(cgroup, opts.Attach, clone, flagAllowMulti)
|
||||||
|
}
|
||||||
|
if errors.Is(err, ErrNotSupported) {
|
||||||
|
cg, err = newProgAttachCgroup(cgroup, opts.Attach, clone, flagAllowOverride)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
cgroup.Close()
|
||||||
|
clone.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return cg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadPinnedCgroup loads a pinned cgroup from a bpffs.
|
||||||
|
func LoadPinnedCgroup(fileName string) (Link, error) {
|
||||||
|
link, err := LoadPinnedRawLink(fileName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &linkCgroup{link}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type progAttachCgroup struct {
|
||||||
|
cgroup *os.File
|
||||||
|
current *ebpf.Program
|
||||||
|
attachType ebpf.AttachType
|
||||||
|
flags cgroupAttachFlags
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Link = (*progAttachCgroup)(nil)
|
||||||
|
|
||||||
|
func (cg *progAttachCgroup) isLink() {}
|
||||||
|
|
||||||
|
func newProgAttachCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program, flags cgroupAttachFlags) (*progAttachCgroup, error) {
|
||||||
|
if flags&flagAllowMulti > 0 {
|
||||||
|
if err := haveProgAttachReplace(); err != nil {
|
||||||
|
return nil, fmt.Errorf("can't support multiple programs: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err := RawAttachProgram(RawAttachProgramOptions{
|
||||||
|
Target: int(cgroup.Fd()),
|
||||||
|
Program: prog,
|
||||||
|
Flags: uint32(flags),
|
||||||
|
Attach: attach,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cgroup: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &progAttachCgroup{cgroup, prog, attach, flags}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cg *progAttachCgroup) Close() error {
|
||||||
|
defer cg.cgroup.Close()
|
||||||
|
defer cg.current.Close()
|
||||||
|
|
||||||
|
err := RawDetachProgram(RawDetachProgramOptions{
|
||||||
|
Target: int(cg.cgroup.Fd()),
|
||||||
|
Program: cg.current,
|
||||||
|
Attach: cg.attachType,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("close cgroup: %s", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cg *progAttachCgroup) Update(prog *ebpf.Program) error {
|
||||||
|
new, err := prog.Clone()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
args := RawAttachProgramOptions{
|
||||||
|
Target: int(cg.cgroup.Fd()),
|
||||||
|
Program: prog,
|
||||||
|
Attach: cg.attachType,
|
||||||
|
Flags: uint32(cg.flags),
|
||||||
|
}
|
||||||
|
|
||||||
|
if cg.flags&flagAllowMulti > 0 {
|
||||||
|
// Atomically replacing multiple programs requires at least
|
||||||
|
// 5.5 (commit 7dd68b3279f17921 "bpf: Support replacing cgroup-bpf
|
||||||
|
// program in MULTI mode")
|
||||||
|
args.Flags |= uint32(flagReplace)
|
||||||
|
args.Replace = cg.current
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := RawAttachProgram(args); err != nil {
|
||||||
|
new.Close()
|
||||||
|
return fmt.Errorf("can't update cgroup: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cg.current.Close()
|
||||||
|
cg.current = new
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cg *progAttachCgroup) Pin(string) error {
|
||||||
|
return fmt.Errorf("can't pin cgroup: %w", ErrNotSupported)
|
||||||
|
}
|
||||||
|
|
||||||
|
type linkCgroup struct {
|
||||||
|
*RawLink
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Link = (*linkCgroup)(nil)
|
||||||
|
|
||||||
|
func (cg *linkCgroup) isLink() {}
|
||||||
|
|
||||||
|
func newLinkCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program) (*linkCgroup, error) {
|
||||||
|
link, err := AttachRawLink(RawLinkOptions{
|
||||||
|
Target: int(cgroup.Fd()),
|
||||||
|
Program: prog,
|
||||||
|
Attach: attach,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &linkCgroup{link}, err
|
||||||
|
}
|
|
@ -0,0 +1,2 @@
|
||||||
|
// Package link allows attaching eBPF programs to various kernel hooks.
|
||||||
|
package link
|
|
@ -0,0 +1,91 @@
|
||||||
|
package link
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/cilium/ebpf"
|
||||||
|
)
|
||||||
|
|
||||||
|
type IterOptions struct {
|
||||||
|
// Program must be of type Tracing with attach type
|
||||||
|
// AttachTraceIter. The kind of iterator to attach to is
|
||||||
|
// determined at load time via the AttachTo field.
|
||||||
|
//
|
||||||
|
// AttachTo requires the kernel to include BTF of itself,
|
||||||
|
// and it to be compiled with a recent pahole (>= 1.16).
|
||||||
|
Program *ebpf.Program
|
||||||
|
}
|
||||||
|
|
||||||
|
// AttachIter attaches a BPF seq_file iterator.
|
||||||
|
func AttachIter(opts IterOptions) (*Iter, error) {
|
||||||
|
link, err := AttachRawLink(RawLinkOptions{
|
||||||
|
Program: opts.Program,
|
||||||
|
Attach: ebpf.AttachTraceIter,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't link iterator: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Iter{link}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadPinnedIter loads a pinned iterator from a bpffs.
|
||||||
|
func LoadPinnedIter(fileName string) (*Iter, error) {
|
||||||
|
link, err := LoadPinnedRawLink(fileName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Iter{link}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iter represents an attached bpf_iter.
|
||||||
|
type Iter struct {
|
||||||
|
link *RawLink
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Link = (*Iter)(nil)
|
||||||
|
|
||||||
|
func (it *Iter) isLink() {}
|
||||||
|
|
||||||
|
// FD returns the underlying file descriptor.
|
||||||
|
func (it *Iter) FD() int {
|
||||||
|
return it.link.FD()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close implements Link.
|
||||||
|
func (it *Iter) Close() error {
|
||||||
|
return it.link.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pin implements Link.
|
||||||
|
func (it *Iter) Pin(fileName string) error {
|
||||||
|
return it.link.Pin(fileName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update implements Link.
|
||||||
|
func (it *Iter) Update(new *ebpf.Program) error {
|
||||||
|
return it.link.Update(new)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open creates a new instance of the iterator.
|
||||||
|
//
|
||||||
|
// Reading from the returned reader triggers the BPF program.
|
||||||
|
func (it *Iter) Open() (io.ReadCloser, error) {
|
||||||
|
linkFd, err := it.link.fd.Value()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
attr := &bpfIterCreateAttr{
|
||||||
|
linkFd: linkFd,
|
||||||
|
}
|
||||||
|
|
||||||
|
fd, err := bpfIterCreate(attr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't create iterator: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return fd.File("bpf_iter"), nil
|
||||||
|
}
|
|
@ -0,0 +1,214 @@
|
||||||
|
package link
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/cilium/ebpf"
|
||||||
|
"github.com/cilium/ebpf/internal"
|
||||||
|
)
|
||||||
|
|
||||||
|
var ErrNotSupported = internal.ErrNotSupported
|
||||||
|
|
||||||
|
// Link represents a Program attached to a BPF hook.
|
||||||
|
type Link interface {
|
||||||
|
// Replace the current program with a new program.
|
||||||
|
//
|
||||||
|
// Passing a nil program is an error. May return an error wrapping ErrNotSupported.
|
||||||
|
Update(*ebpf.Program) error
|
||||||
|
|
||||||
|
// Persist a link by pinning it into a bpffs.
|
||||||
|
//
|
||||||
|
// May return an error wrapping ErrNotSupported.
|
||||||
|
Pin(string) error
|
||||||
|
|
||||||
|
// Close frees resources.
|
||||||
|
//
|
||||||
|
// The link will be broken unless it has been pinned. A link
|
||||||
|
// may continue past the lifetime of the process if Close is
|
||||||
|
// not called.
|
||||||
|
Close() error
|
||||||
|
|
||||||
|
// Prevent external users from implementing this interface.
|
||||||
|
isLink()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ID uniquely identifies a BPF link.
|
||||||
|
type ID uint32
|
||||||
|
|
||||||
|
// RawLinkOptions control the creation of a raw link.
|
||||||
|
type RawLinkOptions struct {
|
||||||
|
// File descriptor to attach to. This differs for each attach type.
|
||||||
|
Target int
|
||||||
|
// Program to attach.
|
||||||
|
Program *ebpf.Program
|
||||||
|
// Attach must match the attach type of Program.
|
||||||
|
Attach ebpf.AttachType
|
||||||
|
}
|
||||||
|
|
||||||
|
// RawLinkInfo contains metadata on a link.
|
||||||
|
type RawLinkInfo struct {
|
||||||
|
Type Type
|
||||||
|
ID ID
|
||||||
|
Program ebpf.ProgramID
|
||||||
|
}
|
||||||
|
|
||||||
|
// RawLink is the low-level API to bpf_link.
|
||||||
|
//
|
||||||
|
// You should consider using the higher level interfaces in this
|
||||||
|
// package instead.
|
||||||
|
type RawLink struct {
|
||||||
|
fd *internal.FD
|
||||||
|
}
|
||||||
|
|
||||||
|
// AttachRawLink creates a raw link.
|
||||||
|
func AttachRawLink(opts RawLinkOptions) (*RawLink, error) {
|
||||||
|
if err := haveBPFLink(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.Target < 0 {
|
||||||
|
return nil, fmt.Errorf("invalid target: %s", internal.ErrClosedFd)
|
||||||
|
}
|
||||||
|
|
||||||
|
progFd := opts.Program.FD()
|
||||||
|
if progFd < 0 {
|
||||||
|
return nil, fmt.Errorf("invalid program: %s", internal.ErrClosedFd)
|
||||||
|
}
|
||||||
|
|
||||||
|
attr := bpfLinkCreateAttr{
|
||||||
|
targetFd: uint32(opts.Target),
|
||||||
|
progFd: uint32(progFd),
|
||||||
|
attachType: opts.Attach,
|
||||||
|
}
|
||||||
|
fd, err := bpfLinkCreate(&attr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't create link: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &RawLink{fd}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadPinnedRawLink loads a persisted link from a bpffs.
|
||||||
|
func LoadPinnedRawLink(fileName string) (*RawLink, error) {
|
||||||
|
return loadPinnedRawLink(fileName, UnspecifiedType)
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadPinnedRawLink(fileName string, typ Type) (*RawLink, error) {
|
||||||
|
fd, err := internal.BPFObjGet(fileName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("load pinned link: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
link := &RawLink{fd}
|
||||||
|
if typ == UnspecifiedType {
|
||||||
|
return link, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err := link.Info()
|
||||||
|
if err != nil {
|
||||||
|
link.Close()
|
||||||
|
return nil, fmt.Errorf("get pinned link info: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.Type != typ {
|
||||||
|
link.Close()
|
||||||
|
return nil, fmt.Errorf("link type %v doesn't match %v", info.Type, typ)
|
||||||
|
}
|
||||||
|
|
||||||
|
return link, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *RawLink) isLink() {}
|
||||||
|
|
||||||
|
// FD returns the raw file descriptor.
|
||||||
|
func (l *RawLink) FD() int {
|
||||||
|
fd, err := l.fd.Value()
|
||||||
|
if err != nil {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return int(fd)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close breaks the link.
|
||||||
|
//
|
||||||
|
// Use Pin if you want to make the link persistent.
|
||||||
|
func (l *RawLink) Close() error {
|
||||||
|
return l.fd.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pin persists a link past the lifetime of the process.
|
||||||
|
//
|
||||||
|
// Calling Close on a pinned Link will not break the link
|
||||||
|
// until the pin is removed.
|
||||||
|
func (l *RawLink) Pin(fileName string) error {
|
||||||
|
if err := internal.BPFObjPin(fileName, l.fd); err != nil {
|
||||||
|
return fmt.Errorf("can't pin link: %s", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update implements Link.
|
||||||
|
func (l *RawLink) Update(new *ebpf.Program) error {
|
||||||
|
return l.UpdateArgs(RawLinkUpdateOptions{
|
||||||
|
New: new,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// RawLinkUpdateOptions control the behaviour of RawLink.UpdateArgs.
|
||||||
|
type RawLinkUpdateOptions struct {
|
||||||
|
New *ebpf.Program
|
||||||
|
Old *ebpf.Program
|
||||||
|
Flags uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateArgs updates a link based on args.
|
||||||
|
func (l *RawLink) UpdateArgs(opts RawLinkUpdateOptions) error {
|
||||||
|
newFd := opts.New.FD()
|
||||||
|
if newFd < 0 {
|
||||||
|
return fmt.Errorf("invalid program: %s", internal.ErrClosedFd)
|
||||||
|
}
|
||||||
|
|
||||||
|
var oldFd int
|
||||||
|
if opts.Old != nil {
|
||||||
|
oldFd = opts.Old.FD()
|
||||||
|
if oldFd < 0 {
|
||||||
|
return fmt.Errorf("invalid replacement program: %s", internal.ErrClosedFd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
linkFd, err := l.fd.Value()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("can't update link: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
attr := bpfLinkUpdateAttr{
|
||||||
|
linkFd: linkFd,
|
||||||
|
newProgFd: uint32(newFd),
|
||||||
|
oldProgFd: uint32(oldFd),
|
||||||
|
flags: opts.Flags,
|
||||||
|
}
|
||||||
|
return bpfLinkUpdate(&attr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// struct bpf_link_info
|
||||||
|
type bpfLinkInfo struct {
|
||||||
|
typ uint32
|
||||||
|
id uint32
|
||||||
|
prog_id uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// Info returns metadata about the link.
|
||||||
|
func (l *RawLink) Info() (*RawLinkInfo, error) {
|
||||||
|
var info bpfLinkInfo
|
||||||
|
err := internal.BPFObjGetInfoByFD(l.fd, unsafe.Pointer(&info), unsafe.Sizeof(info))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("link info: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &RawLinkInfo{
|
||||||
|
Type(info.typ),
|
||||||
|
ID(info.id),
|
||||||
|
ebpf.ProgramID(info.prog_id),
|
||||||
|
}, nil
|
||||||
|
}
|
|
@ -0,0 +1,60 @@
|
||||||
|
package link
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/cilium/ebpf"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NetNsInfo contains metadata about a network namespace link.
|
||||||
|
type NetNsInfo struct {
|
||||||
|
RawLinkInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetNsLink is a program attached to a network namespace.
|
||||||
|
type NetNsLink struct {
|
||||||
|
*RawLink
|
||||||
|
}
|
||||||
|
|
||||||
|
// AttachNetNs attaches a program to a network namespace.
|
||||||
|
func AttachNetNs(ns int, prog *ebpf.Program) (*NetNsLink, error) {
|
||||||
|
var attach ebpf.AttachType
|
||||||
|
switch t := prog.Type(); t {
|
||||||
|
case ebpf.FlowDissector:
|
||||||
|
attach = ebpf.AttachFlowDissector
|
||||||
|
case ebpf.SkLookup:
|
||||||
|
attach = ebpf.AttachSkLookup
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("can't attach %v to network namespace", t)
|
||||||
|
}
|
||||||
|
|
||||||
|
link, err := AttachRawLink(RawLinkOptions{
|
||||||
|
Target: ns,
|
||||||
|
Program: prog,
|
||||||
|
Attach: attach,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &NetNsLink{link}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadPinnedNetNs loads a network namespace link from bpffs.
|
||||||
|
func LoadPinnedNetNs(fileName string) (*NetNsLink, error) {
|
||||||
|
link, err := loadPinnedRawLink(fileName, NetNsType)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &NetNsLink{link}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Info returns information about the link.
|
||||||
|
func (nns *NetNsLink) Info() (*NetNsInfo, error) {
|
||||||
|
info, err := nns.RawLink.Info()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &NetNsInfo{*info}, nil
|
||||||
|
}
|
|
@ -0,0 +1,76 @@
|
||||||
|
package link
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/cilium/ebpf"
|
||||||
|
"github.com/cilium/ebpf/internal"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RawAttachProgramOptions struct {
|
||||||
|
// File descriptor to attach to. This differs for each attach type.
|
||||||
|
Target int
|
||||||
|
// Program to attach.
|
||||||
|
Program *ebpf.Program
|
||||||
|
// Program to replace (cgroups).
|
||||||
|
Replace *ebpf.Program
|
||||||
|
// Attach must match the attach type of Program (and Replace).
|
||||||
|
Attach ebpf.AttachType
|
||||||
|
// Flags control the attach behaviour. This differs for each attach type.
|
||||||
|
Flags uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// RawAttachProgram is a low level wrapper around BPF_PROG_ATTACH.
|
||||||
|
//
|
||||||
|
// You should use one of the higher level abstractions available in this
|
||||||
|
// package if possible.
|
||||||
|
func RawAttachProgram(opts RawAttachProgramOptions) error {
|
||||||
|
if err := haveProgAttach(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var replaceFd uint32
|
||||||
|
if opts.Replace != nil {
|
||||||
|
replaceFd = uint32(opts.Replace.FD())
|
||||||
|
}
|
||||||
|
|
||||||
|
attr := internal.BPFProgAttachAttr{
|
||||||
|
TargetFd: uint32(opts.Target),
|
||||||
|
AttachBpfFd: uint32(opts.Program.FD()),
|
||||||
|
ReplaceBpfFd: replaceFd,
|
||||||
|
AttachType: uint32(opts.Attach),
|
||||||
|
AttachFlags: uint32(opts.Flags),
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := internal.BPFProgAttach(&attr); err != nil {
|
||||||
|
return fmt.Errorf("can't attach program: %s", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type RawDetachProgramOptions struct {
|
||||||
|
Target int
|
||||||
|
Program *ebpf.Program
|
||||||
|
Attach ebpf.AttachType
|
||||||
|
}
|
||||||
|
|
||||||
|
// RawDetachProgram is a low level wrapper around BPF_PROG_DETACH.
|
||||||
|
//
|
||||||
|
// You should use one of the higher level abstractions available in this
|
||||||
|
// package if possible.
|
||||||
|
func RawDetachProgram(opts RawDetachProgramOptions) error {
|
||||||
|
if err := haveProgAttach(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
attr := internal.BPFProgDetachAttr{
|
||||||
|
TargetFd: uint32(opts.Target),
|
||||||
|
AttachBpfFd: uint32(opts.Program.FD()),
|
||||||
|
AttachType: uint32(opts.Attach),
|
||||||
|
}
|
||||||
|
if err := internal.BPFProgDetach(&attr); err != nil {
|
||||||
|
return fmt.Errorf("can't detach program: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,57 @@
|
||||||
|
package link
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/cilium/ebpf"
|
||||||
|
"github.com/cilium/ebpf/internal"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RawTracepointOptions struct {
|
||||||
|
// Tracepoint name.
|
||||||
|
Name string
|
||||||
|
// Program must be of type RawTracepoint*
|
||||||
|
Program *ebpf.Program
|
||||||
|
}
|
||||||
|
|
||||||
|
// AttachRawTracepoint links a BPF program to a raw_tracepoint.
|
||||||
|
//
|
||||||
|
// Requires at least Linux 4.17.
|
||||||
|
func AttachRawTracepoint(opts RawTracepointOptions) (Link, error) {
|
||||||
|
if t := opts.Program.Type(); t != ebpf.RawTracepoint && t != ebpf.RawTracepointWritable {
|
||||||
|
return nil, fmt.Errorf("invalid program type %s, expected RawTracepoint(Writable)", t)
|
||||||
|
}
|
||||||
|
if opts.Program.FD() < 0 {
|
||||||
|
return nil, fmt.Errorf("invalid program: %w", internal.ErrClosedFd)
|
||||||
|
}
|
||||||
|
|
||||||
|
fd, err := bpfRawTracepointOpen(&bpfRawTracepointOpenAttr{
|
||||||
|
name: internal.NewStringPointer(opts.Name),
|
||||||
|
fd: uint32(opts.Program.FD()),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &progAttachRawTracepoint{fd: fd}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type progAttachRawTracepoint struct {
|
||||||
|
fd *internal.FD
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Link = (*progAttachRawTracepoint)(nil)
|
||||||
|
|
||||||
|
func (rt *progAttachRawTracepoint) isLink() {}
|
||||||
|
|
||||||
|
func (rt *progAttachRawTracepoint) Close() error {
|
||||||
|
return rt.fd.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rt *progAttachRawTracepoint) Update(_ *ebpf.Program) error {
|
||||||
|
return fmt.Errorf("can't update raw_tracepoint: %w", ErrNotSupported)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rt *progAttachRawTracepoint) Pin(_ string) error {
|
||||||
|
return fmt.Errorf("can't pin raw_tracepoint: %w", ErrNotSupported)
|
||||||
|
}
|
|
@ -0,0 +1,173 @@
|
||||||
|
package link
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/cilium/ebpf"
|
||||||
|
"github.com/cilium/ebpf/asm"
|
||||||
|
"github.com/cilium/ebpf/internal"
|
||||||
|
"github.com/cilium/ebpf/internal/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Type is the kind of link.
|
||||||
|
type Type uint32
|
||||||
|
|
||||||
|
// Valid link types.
|
||||||
|
//
|
||||||
|
// Equivalent to enum bpf_link_type.
|
||||||
|
const (
|
||||||
|
UnspecifiedType Type = iota
|
||||||
|
RawTracepointType
|
||||||
|
TracingType
|
||||||
|
CgroupType
|
||||||
|
IterType
|
||||||
|
NetNsType
|
||||||
|
XDPType
|
||||||
|
)
|
||||||
|
|
||||||
|
var haveProgAttach = internal.FeatureTest("BPF_PROG_ATTACH", "4.10", func() error {
|
||||||
|
prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
|
||||||
|
Type: ebpf.CGroupSKB,
|
||||||
|
AttachType: ebpf.AttachCGroupInetIngress,
|
||||||
|
License: "MIT",
|
||||||
|
Instructions: asm.Instructions{
|
||||||
|
asm.Mov.Imm(asm.R0, 0),
|
||||||
|
asm.Return(),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return internal.ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
// BPF_PROG_ATTACH was introduced at the same time as CGgroupSKB,
|
||||||
|
// so being able to load the program is enough to infer that we
|
||||||
|
// have the syscall.
|
||||||
|
prog.Close()
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
var haveProgAttachReplace = internal.FeatureTest("BPF_PROG_ATTACH atomic replacement", "5.5", func() error {
|
||||||
|
if err := haveProgAttach(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
|
||||||
|
Type: ebpf.CGroupSKB,
|
||||||
|
AttachType: ebpf.AttachCGroupInetIngress,
|
||||||
|
License: "MIT",
|
||||||
|
Instructions: asm.Instructions{
|
||||||
|
asm.Mov.Imm(asm.R0, 0),
|
||||||
|
asm.Return(),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return internal.ErrNotSupported
|
||||||
|
}
|
||||||
|
defer prog.Close()
|
||||||
|
|
||||||
|
// We know that we have BPF_PROG_ATTACH since we can load CGroupSKB programs.
|
||||||
|
// If passing BPF_F_REPLACE gives us EINVAL we know that the feature isn't
|
||||||
|
// present.
|
||||||
|
attr := internal.BPFProgAttachAttr{
|
||||||
|
// We rely on this being checked after attachFlags.
|
||||||
|
TargetFd: ^uint32(0),
|
||||||
|
AttachBpfFd: uint32(prog.FD()),
|
||||||
|
AttachType: uint32(ebpf.AttachCGroupInetIngress),
|
||||||
|
AttachFlags: uint32(flagReplace),
|
||||||
|
}
|
||||||
|
|
||||||
|
err = internal.BPFProgAttach(&attr)
|
||||||
|
if errors.Is(err, unix.EINVAL) {
|
||||||
|
return internal.ErrNotSupported
|
||||||
|
}
|
||||||
|
if errors.Is(err, unix.EBADF) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
|
||||||
|
type bpfLinkCreateAttr struct {
|
||||||
|
progFd uint32
|
||||||
|
targetFd uint32
|
||||||
|
attachType ebpf.AttachType
|
||||||
|
flags uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func bpfLinkCreate(attr *bpfLinkCreateAttr) (*internal.FD, error) {
|
||||||
|
ptr, err := internal.BPF(internal.BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return internal.NewFD(uint32(ptr)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type bpfLinkUpdateAttr struct {
|
||||||
|
linkFd uint32
|
||||||
|
newProgFd uint32
|
||||||
|
flags uint32
|
||||||
|
oldProgFd uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func bpfLinkUpdate(attr *bpfLinkUpdateAttr) error {
|
||||||
|
_, err := internal.BPF(internal.BPF_LINK_UPDATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var haveBPFLink = internal.FeatureTest("bpf_link", "5.7", func() error {
|
||||||
|
prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
|
||||||
|
Type: ebpf.CGroupSKB,
|
||||||
|
AttachType: ebpf.AttachCGroupInetIngress,
|
||||||
|
License: "MIT",
|
||||||
|
Instructions: asm.Instructions{
|
||||||
|
asm.Mov.Imm(asm.R0, 0),
|
||||||
|
asm.Return(),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return internal.ErrNotSupported
|
||||||
|
}
|
||||||
|
defer prog.Close()
|
||||||
|
|
||||||
|
attr := bpfLinkCreateAttr{
|
||||||
|
// This is a hopefully invalid file descriptor, which triggers EBADF.
|
||||||
|
targetFd: ^uint32(0),
|
||||||
|
progFd: uint32(prog.FD()),
|
||||||
|
attachType: ebpf.AttachCGroupInetIngress,
|
||||||
|
}
|
||||||
|
_, err = bpfLinkCreate(&attr)
|
||||||
|
if errors.Is(err, unix.EINVAL) {
|
||||||
|
return internal.ErrNotSupported
|
||||||
|
}
|
||||||
|
if errors.Is(err, unix.EBADF) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
|
||||||
|
type bpfIterCreateAttr struct {
|
||||||
|
linkFd uint32
|
||||||
|
flags uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func bpfIterCreate(attr *bpfIterCreateAttr) (*internal.FD, error) {
|
||||||
|
ptr, err := internal.BPF(internal.BPF_ITER_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
||||||
|
if err == nil {
|
||||||
|
return internal.NewFD(uint32(ptr)), nil
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type bpfRawTracepointOpenAttr struct {
|
||||||
|
name internal.Pointer
|
||||||
|
fd uint32
|
||||||
|
_ uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func bpfRawTracepointOpen(attr *bpfRawTracepointOpenAttr) (*internal.FD, error) {
|
||||||
|
ptr, err := internal.BPF(internal.BPF_RAW_TRACEPOINT_OPEN, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
||||||
|
if err == nil {
|
||||||
|
return internal.NewFD(uint32(ptr)), nil
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
|
@ -0,0 +1,133 @@
|
||||||
|
package ebpf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/cilium/ebpf/asm"
|
||||||
|
"github.com/cilium/ebpf/internal/btf"
|
||||||
|
)
|
||||||
|
|
||||||
|
// link resolves bpf-to-bpf calls.
|
||||||
|
//
|
||||||
|
// Each library may contain multiple functions / labels, and is only linked
|
||||||
|
// if prog references one of these functions.
|
||||||
|
//
|
||||||
|
// Libraries also linked.
|
||||||
|
func link(prog *ProgramSpec, libs []*ProgramSpec) error {
|
||||||
|
var (
|
||||||
|
linked = make(map[*ProgramSpec]bool)
|
||||||
|
pending = []asm.Instructions{prog.Instructions}
|
||||||
|
insns asm.Instructions
|
||||||
|
)
|
||||||
|
for len(pending) > 0 {
|
||||||
|
insns, pending = pending[0], pending[1:]
|
||||||
|
for _, lib := range libs {
|
||||||
|
if linked[lib] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
needed, err := needSection(insns, lib.Instructions)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("linking %s: %w", lib.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !needed {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
linked[lib] = true
|
||||||
|
prog.Instructions = append(prog.Instructions, lib.Instructions...)
|
||||||
|
pending = append(pending, lib.Instructions)
|
||||||
|
|
||||||
|
if prog.BTF != nil && lib.BTF != nil {
|
||||||
|
if err := btf.ProgramAppend(prog.BTF, lib.BTF); err != nil {
|
||||||
|
return fmt.Errorf("linking BTF of %s: %w", lib.Name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func needSection(insns, section asm.Instructions) (bool, error) {
|
||||||
|
// A map of symbols to the libraries which contain them.
|
||||||
|
symbols, err := section.SymbolOffsets()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ins := range insns {
|
||||||
|
if ins.Reference == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if ins.OpCode.JumpOp() != asm.Call || ins.Src != asm.PseudoCall {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if ins.Constant != -1 {
|
||||||
|
// This is already a valid call, no need to link again.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := symbols[ins.Reference]; !ok {
|
||||||
|
// Symbol isn't available in this section
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// At this point we know that at least one function in the
|
||||||
|
// library is called from insns, so we have to link it.
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// None of the functions in the section are called.
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func fixupJumpsAndCalls(insns asm.Instructions) error {
|
||||||
|
symbolOffsets := make(map[string]asm.RawInstructionOffset)
|
||||||
|
iter := insns.Iterate()
|
||||||
|
for iter.Next() {
|
||||||
|
ins := iter.Ins
|
||||||
|
|
||||||
|
if ins.Symbol == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := symbolOffsets[ins.Symbol]; ok {
|
||||||
|
return fmt.Errorf("duplicate symbol %s", ins.Symbol)
|
||||||
|
}
|
||||||
|
|
||||||
|
symbolOffsets[ins.Symbol] = iter.Offset
|
||||||
|
}
|
||||||
|
|
||||||
|
iter = insns.Iterate()
|
||||||
|
for iter.Next() {
|
||||||
|
i := iter.Index
|
||||||
|
offset := iter.Offset
|
||||||
|
ins := iter.Ins
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case ins.IsFunctionCall() && ins.Constant == -1:
|
||||||
|
// Rewrite bpf to bpf call
|
||||||
|
callOffset, ok := symbolOffsets[ins.Reference]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("instruction %d: reference to missing symbol %q", i, ins.Reference)
|
||||||
|
}
|
||||||
|
|
||||||
|
ins.Constant = int64(callOffset - offset - 1)
|
||||||
|
|
||||||
|
case ins.OpCode.Class() == asm.JumpClass && ins.Offset == -1:
|
||||||
|
// Rewrite jump to label
|
||||||
|
jumpOffset, ok := symbolOffsets[ins.Reference]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("instruction %d: reference to missing symbol %q", i, ins.Reference)
|
||||||
|
}
|
||||||
|
|
||||||
|
ins.Offset = int16(jumpOffset - offset - 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,216 @@
|
||||||
|
package ebpf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"runtime"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/cilium/ebpf/internal"
|
||||||
|
)
|
||||||
|
|
||||||
|
// marshalPtr converts an arbitrary value into a pointer suitable
|
||||||
|
// to be passed to the kernel.
|
||||||
|
//
|
||||||
|
// As an optimization, it returns the original value if it is an
|
||||||
|
// unsafe.Pointer.
|
||||||
|
func marshalPtr(data interface{}, length int) (internal.Pointer, error) {
|
||||||
|
if ptr, ok := data.(unsafe.Pointer); ok {
|
||||||
|
return internal.NewPointer(ptr), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
buf, err := marshalBytes(data, length)
|
||||||
|
if err != nil {
|
||||||
|
return internal.Pointer{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return internal.NewSlicePointer(buf), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// marshalBytes converts an arbitrary value into a byte buffer.
|
||||||
|
//
|
||||||
|
// Prefer using Map.marshalKey and Map.marshalValue if possible, since
|
||||||
|
// those have special cases that allow more types to be encoded.
|
||||||
|
//
|
||||||
|
// Returns an error if the given value isn't representable in exactly
|
||||||
|
// length bytes.
|
||||||
|
func marshalBytes(data interface{}, length int) (buf []byte, err error) {
|
||||||
|
switch value := data.(type) {
|
||||||
|
case encoding.BinaryMarshaler:
|
||||||
|
buf, err = value.MarshalBinary()
|
||||||
|
case string:
|
||||||
|
buf = []byte(value)
|
||||||
|
case []byte:
|
||||||
|
buf = value
|
||||||
|
case unsafe.Pointer:
|
||||||
|
err = errors.New("can't marshal from unsafe.Pointer")
|
||||||
|
case Map, *Map, Program, *Program:
|
||||||
|
err = fmt.Errorf("can't marshal %T", value)
|
||||||
|
default:
|
||||||
|
var wr bytes.Buffer
|
||||||
|
err = binary.Write(&wr, internal.NativeEndian, value)
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("encoding %T: %v", value, err)
|
||||||
|
}
|
||||||
|
buf = wr.Bytes()
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(buf) != length {
|
||||||
|
return nil, fmt.Errorf("%T doesn't marshal to %d bytes", data, length)
|
||||||
|
}
|
||||||
|
return buf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeBuffer(dst interface{}, length int) (internal.Pointer, []byte) {
|
||||||
|
if ptr, ok := dst.(unsafe.Pointer); ok {
|
||||||
|
return internal.NewPointer(ptr), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := make([]byte, length)
|
||||||
|
return internal.NewSlicePointer(buf), buf
|
||||||
|
}
|
||||||
|
|
||||||
|
// unmarshalBytes converts a byte buffer into an arbitrary value.
|
||||||
|
//
|
||||||
|
// Prefer using Map.unmarshalKey and Map.unmarshalValue if possible, since
|
||||||
|
// those have special cases that allow more types to be encoded.
|
||||||
|
func unmarshalBytes(data interface{}, buf []byte) error {
|
||||||
|
switch value := data.(type) {
|
||||||
|
case unsafe.Pointer:
|
||||||
|
sh := &reflect.SliceHeader{
|
||||||
|
Data: uintptr(value),
|
||||||
|
Len: len(buf),
|
||||||
|
Cap: len(buf),
|
||||||
|
}
|
||||||
|
|
||||||
|
dst := *(*[]byte)(unsafe.Pointer(sh))
|
||||||
|
copy(dst, buf)
|
||||||
|
runtime.KeepAlive(value)
|
||||||
|
return nil
|
||||||
|
case Map, *Map, Program, *Program:
|
||||||
|
return fmt.Errorf("can't unmarshal into %T", value)
|
||||||
|
case encoding.BinaryUnmarshaler:
|
||||||
|
return value.UnmarshalBinary(buf)
|
||||||
|
case *string:
|
||||||
|
*value = string(buf)
|
||||||
|
return nil
|
||||||
|
case *[]byte:
|
||||||
|
*value = buf
|
||||||
|
return nil
|
||||||
|
case string:
|
||||||
|
return errors.New("require pointer to string")
|
||||||
|
case []byte:
|
||||||
|
return errors.New("require pointer to []byte")
|
||||||
|
default:
|
||||||
|
rd := bytes.NewReader(buf)
|
||||||
|
if err := binary.Read(rd, internal.NativeEndian, value); err != nil {
|
||||||
|
return fmt.Errorf("decoding %T: %v", value, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// marshalPerCPUValue encodes a slice containing one value per
|
||||||
|
// possible CPU into a buffer of bytes.
|
||||||
|
//
|
||||||
|
// Values are initialized to zero if the slice has less elements than CPUs.
|
||||||
|
//
|
||||||
|
// slice must have a type like []elementType.
|
||||||
|
func marshalPerCPUValue(slice interface{}, elemLength int) (internal.Pointer, error) {
|
||||||
|
sliceType := reflect.TypeOf(slice)
|
||||||
|
if sliceType.Kind() != reflect.Slice {
|
||||||
|
return internal.Pointer{}, errors.New("per-CPU value requires slice")
|
||||||
|
}
|
||||||
|
|
||||||
|
possibleCPUs, err := internal.PossibleCPUs()
|
||||||
|
if err != nil {
|
||||||
|
return internal.Pointer{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
sliceValue := reflect.ValueOf(slice)
|
||||||
|
sliceLen := sliceValue.Len()
|
||||||
|
if sliceLen > possibleCPUs {
|
||||||
|
return internal.Pointer{}, fmt.Errorf("per-CPU value exceeds number of CPUs")
|
||||||
|
}
|
||||||
|
|
||||||
|
alignedElemLength := align(elemLength, 8)
|
||||||
|
buf := make([]byte, alignedElemLength*possibleCPUs)
|
||||||
|
|
||||||
|
for i := 0; i < sliceLen; i++ {
|
||||||
|
elem := sliceValue.Index(i).Interface()
|
||||||
|
elemBytes, err := marshalBytes(elem, elemLength)
|
||||||
|
if err != nil {
|
||||||
|
return internal.Pointer{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
offset := i * alignedElemLength
|
||||||
|
copy(buf[offset:offset+elemLength], elemBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
return internal.NewSlicePointer(buf), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// unmarshalPerCPUValue decodes a buffer into a slice containing one value per
|
||||||
|
// possible CPU.
|
||||||
|
//
|
||||||
|
// valueOut must have a type like *[]elementType
|
||||||
|
func unmarshalPerCPUValue(slicePtr interface{}, elemLength int, buf []byte) error {
|
||||||
|
slicePtrType := reflect.TypeOf(slicePtr)
|
||||||
|
if slicePtrType.Kind() != reflect.Ptr || slicePtrType.Elem().Kind() != reflect.Slice {
|
||||||
|
return fmt.Errorf("per-cpu value requires pointer to slice")
|
||||||
|
}
|
||||||
|
|
||||||
|
possibleCPUs, err := internal.PossibleCPUs()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
sliceType := slicePtrType.Elem()
|
||||||
|
slice := reflect.MakeSlice(sliceType, possibleCPUs, possibleCPUs)
|
||||||
|
|
||||||
|
sliceElemType := sliceType.Elem()
|
||||||
|
sliceElemIsPointer := sliceElemType.Kind() == reflect.Ptr
|
||||||
|
if sliceElemIsPointer {
|
||||||
|
sliceElemType = sliceElemType.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
step := len(buf) / possibleCPUs
|
||||||
|
if step < elemLength {
|
||||||
|
return fmt.Errorf("per-cpu element length is larger than available data")
|
||||||
|
}
|
||||||
|
for i := 0; i < possibleCPUs; i++ {
|
||||||
|
var elem interface{}
|
||||||
|
if sliceElemIsPointer {
|
||||||
|
newElem := reflect.New(sliceElemType)
|
||||||
|
slice.Index(i).Set(newElem)
|
||||||
|
elem = newElem.Interface()
|
||||||
|
} else {
|
||||||
|
elem = slice.Index(i).Addr().Interface()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make a copy, since unmarshal can hold on to itemBytes
|
||||||
|
elemBytes := make([]byte, elemLength)
|
||||||
|
copy(elemBytes, buf[:elemLength])
|
||||||
|
|
||||||
|
err := unmarshalBytes(elem, elemBytes)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cpu %d: %w", i, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf = buf[step:]
|
||||||
|
}
|
||||||
|
|
||||||
|
reflect.ValueOf(slicePtr).Elem().Set(slice)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func align(n, alignment int) int {
|
||||||
|
return (int(n) + alignment - 1) / alignment * alignment
|
||||||
|
}
|
|
@ -0,0 +1,42 @@
|
||||||
|
package ebpf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/cilium/ebpf/internal"
|
||||||
|
)
|
||||||
|
|
||||||
|
func pin(currentPath, newPath string, fd *internal.FD) error {
|
||||||
|
if newPath == "" {
|
||||||
|
return errors.New("given pinning path cannot be empty")
|
||||||
|
}
|
||||||
|
if currentPath == "" {
|
||||||
|
return internal.BPFObjPin(newPath, fd)
|
||||||
|
}
|
||||||
|
if currentPath == newPath {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
// Object is now moved to the new pinning path.
|
||||||
|
if err = os.Rename(currentPath, newPath); err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
return fmt.Errorf("unable to move pinned object to new path %v: %w", newPath, err)
|
||||||
|
}
|
||||||
|
// Internal state not in sync with the file system so let's fix it.
|
||||||
|
return internal.BPFObjPin(newPath, fd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func unpin(pinnedPath string) error {
|
||||||
|
if pinnedPath == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
err := os.Remove(pinnedPath)
|
||||||
|
if err == nil || os.IsNotExist(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
|
@ -0,0 +1,698 @@
|
||||||
|
package ebpf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/cilium/ebpf/asm"
|
||||||
|
"github.com/cilium/ebpf/internal"
|
||||||
|
"github.com/cilium/ebpf/internal/btf"
|
||||||
|
"github.com/cilium/ebpf/internal/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrNotSupported is returned whenever the kernel doesn't support a feature.
|
||||||
|
var ErrNotSupported = internal.ErrNotSupported
|
||||||
|
|
||||||
|
// ProgramID represents the unique ID of an eBPF program.
|
||||||
|
type ProgramID uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Number of bytes to pad the output buffer for BPF_PROG_TEST_RUN.
|
||||||
|
// This is currently the maximum of spare space allocated for SKB
|
||||||
|
// and XDP programs, and equal to XDP_PACKET_HEADROOM + NET_IP_ALIGN.
|
||||||
|
outputPad = 256 + 2
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultVerifierLogSize is the default number of bytes allocated for the
|
||||||
|
// verifier log.
|
||||||
|
const DefaultVerifierLogSize = 64 * 1024
|
||||||
|
|
||||||
|
// ProgramOptions control loading a program into the kernel.
|
||||||
|
type ProgramOptions struct {
|
||||||
|
// Controls the detail emitted by the kernel verifier. Set to non-zero
|
||||||
|
// to enable logging.
|
||||||
|
LogLevel uint32
|
||||||
|
// Controls the output buffer size for the verifier. Defaults to
|
||||||
|
// DefaultVerifierLogSize.
|
||||||
|
LogSize int
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProgramSpec defines a Program.
|
||||||
|
type ProgramSpec struct {
|
||||||
|
// Name is passed to the kernel as a debug aid. Must only contain
|
||||||
|
// alpha numeric and '_' characters.
|
||||||
|
Name string
|
||||||
|
// Type determines at which hook in the kernel a program will run.
|
||||||
|
Type ProgramType
|
||||||
|
AttachType AttachType
|
||||||
|
// Name of a kernel data structure to attach to. It's interpretation
|
||||||
|
// depends on Type and AttachType.
|
||||||
|
AttachTo string
|
||||||
|
Instructions asm.Instructions
|
||||||
|
|
||||||
|
// License of the program. Some helpers are only available if
|
||||||
|
// the license is deemed compatible with the GPL.
|
||||||
|
//
|
||||||
|
// See https://www.kernel.org/doc/html/latest/process/license-rules.html#id1
|
||||||
|
License string
|
||||||
|
|
||||||
|
// Version used by tracing programs.
|
||||||
|
//
|
||||||
|
// Deprecated: superseded by BTF.
|
||||||
|
KernelVersion uint32
|
||||||
|
|
||||||
|
// The BTF associated with this program. Changing Instructions
|
||||||
|
// will most likely invalidate the contained data, and may
|
||||||
|
// result in errors when attempting to load it into the kernel.
|
||||||
|
BTF *btf.Program
|
||||||
|
|
||||||
|
// The byte order this program was compiled for, may be nil.
|
||||||
|
ByteOrder binary.ByteOrder
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy returns a copy of the spec.
|
||||||
|
func (ps *ProgramSpec) Copy() *ProgramSpec {
|
||||||
|
if ps == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
cpy := *ps
|
||||||
|
cpy.Instructions = make(asm.Instructions, len(ps.Instructions))
|
||||||
|
copy(cpy.Instructions, ps.Instructions)
|
||||||
|
return &cpy
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tag calculates the kernel tag for a series of instructions.
|
||||||
|
//
|
||||||
|
// Use asm.Instructions.Tag if you need to calculate for non-native endianness.
|
||||||
|
func (ps *ProgramSpec) Tag() (string, error) {
|
||||||
|
return ps.Instructions.Tag(internal.NativeEndian)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Program represents BPF program loaded into the kernel.
|
||||||
|
//
|
||||||
|
// It is not safe to close a Program which is used by other goroutines.
|
||||||
|
type Program struct {
|
||||||
|
// Contains the output of the kernel verifier if enabled,
|
||||||
|
// otherwise it is empty.
|
||||||
|
VerifierLog string
|
||||||
|
|
||||||
|
fd *internal.FD
|
||||||
|
name string
|
||||||
|
pinnedPath string
|
||||||
|
typ ProgramType
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewProgram creates a new Program.
|
||||||
|
//
|
||||||
|
// Loading a program for the first time will perform
|
||||||
|
// feature detection by loading small, temporary programs.
|
||||||
|
func NewProgram(spec *ProgramSpec) (*Program, error) {
|
||||||
|
return NewProgramWithOptions(spec, ProgramOptions{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewProgramWithOptions creates a new Program.
|
||||||
|
//
|
||||||
|
// Loading a program for the first time will perform
|
||||||
|
// feature detection by loading small, temporary programs.
|
||||||
|
func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) {
|
||||||
|
btfs := make(btfHandleCache)
|
||||||
|
defer btfs.close()
|
||||||
|
|
||||||
|
return newProgramWithOptions(spec, opts, btfs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, btfs btfHandleCache) (*Program, error) {
|
||||||
|
if len(spec.Instructions) == 0 {
|
||||||
|
return nil, errors.New("Instructions cannot be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(spec.License) == 0 {
|
||||||
|
return nil, errors.New("License cannot be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
if spec.ByteOrder != nil && spec.ByteOrder != internal.NativeEndian {
|
||||||
|
return nil, fmt.Errorf("can't load %s program on %s", spec.ByteOrder, internal.NativeEndian)
|
||||||
|
}
|
||||||
|
|
||||||
|
insns := make(asm.Instructions, len(spec.Instructions))
|
||||||
|
copy(insns, spec.Instructions)
|
||||||
|
|
||||||
|
if err := fixupJumpsAndCalls(insns); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := bytes.NewBuffer(make([]byte, 0, len(spec.Instructions)*asm.InstructionSize))
|
||||||
|
err := insns.Marshal(buf, internal.NativeEndian)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
bytecode := buf.Bytes()
|
||||||
|
insCount := uint32(len(bytecode) / asm.InstructionSize)
|
||||||
|
attr := &bpfProgLoadAttr{
|
||||||
|
progType: spec.Type,
|
||||||
|
expectedAttachType: spec.AttachType,
|
||||||
|
insCount: insCount,
|
||||||
|
instructions: internal.NewSlicePointer(bytecode),
|
||||||
|
license: internal.NewStringPointer(spec.License),
|
||||||
|
kernelVersion: spec.KernelVersion,
|
||||||
|
}
|
||||||
|
|
||||||
|
if haveObjName() == nil {
|
||||||
|
attr.progName = newBPFObjName(spec.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
var btfDisabled bool
|
||||||
|
if spec.BTF != nil {
|
||||||
|
if relos, err := btf.ProgramRelocations(spec.BTF, nil); err != nil {
|
||||||
|
return nil, fmt.Errorf("CO-RE relocations: %s", err)
|
||||||
|
} else if len(relos) > 0 {
|
||||||
|
return nil, fmt.Errorf("applying CO-RE relocations: %w", ErrNotSupported)
|
||||||
|
}
|
||||||
|
|
||||||
|
handle, err := btfs.load(btf.ProgramSpec(spec.BTF))
|
||||||
|
btfDisabled = errors.Is(err, btf.ErrNotSupported)
|
||||||
|
if err != nil && !btfDisabled {
|
||||||
|
return nil, fmt.Errorf("load BTF: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if handle != nil {
|
||||||
|
attr.progBTFFd = uint32(handle.FD())
|
||||||
|
|
||||||
|
recSize, bytes, err := btf.ProgramLineInfos(spec.BTF)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("get BTF line infos: %w", err)
|
||||||
|
}
|
||||||
|
attr.lineInfoRecSize = recSize
|
||||||
|
attr.lineInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize))
|
||||||
|
attr.lineInfo = internal.NewSlicePointer(bytes)
|
||||||
|
|
||||||
|
recSize, bytes, err = btf.ProgramFuncInfos(spec.BTF)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("get BTF function infos: %w", err)
|
||||||
|
}
|
||||||
|
attr.funcInfoRecSize = recSize
|
||||||
|
attr.funcInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize))
|
||||||
|
attr.funcInfo = internal.NewSlicePointer(bytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if spec.AttachTo != "" {
|
||||||
|
target, err := resolveBTFType(spec.AttachTo, spec.Type, spec.AttachType)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if target != nil {
|
||||||
|
attr.attachBTFID = target.ID()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
logSize := DefaultVerifierLogSize
|
||||||
|
if opts.LogSize > 0 {
|
||||||
|
logSize = opts.LogSize
|
||||||
|
}
|
||||||
|
|
||||||
|
var logBuf []byte
|
||||||
|
if opts.LogLevel > 0 {
|
||||||
|
logBuf = make([]byte, logSize)
|
||||||
|
attr.logLevel = opts.LogLevel
|
||||||
|
attr.logSize = uint32(len(logBuf))
|
||||||
|
attr.logBuf = internal.NewSlicePointer(logBuf)
|
||||||
|
}
|
||||||
|
|
||||||
|
fd, err := bpfProgLoad(attr)
|
||||||
|
if err == nil {
|
||||||
|
return &Program{internal.CString(logBuf), fd, spec.Name, "", spec.Type}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
logErr := err
|
||||||
|
if opts.LogLevel == 0 {
|
||||||
|
// Re-run with the verifier enabled to get better error messages.
|
||||||
|
logBuf = make([]byte, logSize)
|
||||||
|
attr.logLevel = 1
|
||||||
|
attr.logSize = uint32(len(logBuf))
|
||||||
|
attr.logBuf = internal.NewSlicePointer(logBuf)
|
||||||
|
|
||||||
|
_, logErr = bpfProgLoad(attr)
|
||||||
|
}
|
||||||
|
|
||||||
|
if errors.Is(logErr, unix.EPERM) && logBuf[0] == 0 {
|
||||||
|
// EPERM due to RLIMIT_MEMLOCK happens before the verifier, so we can
|
||||||
|
// check that the log is empty to reduce false positives.
|
||||||
|
return nil, fmt.Errorf("load program: RLIMIT_MEMLOCK may be too low: %w", logErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = internal.ErrorWithLog(err, logBuf, logErr)
|
||||||
|
if btfDisabled {
|
||||||
|
return nil, fmt.Errorf("load program without BTF: %w", err)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("load program: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewProgramFromFD creates a program from a raw fd.
|
||||||
|
//
|
||||||
|
// You should not use fd after calling this function.
|
||||||
|
//
|
||||||
|
// Requires at least Linux 4.10.
|
||||||
|
func NewProgramFromFD(fd int) (*Program, error) {
|
||||||
|
if fd < 0 {
|
||||||
|
return nil, errors.New("invalid fd")
|
||||||
|
}
|
||||||
|
|
||||||
|
return newProgramFromFD(internal.NewFD(uint32(fd)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewProgramFromID returns the program for a given id.
|
||||||
|
//
|
||||||
|
// Returns ErrNotExist, if there is no eBPF program with the given id.
|
||||||
|
func NewProgramFromID(id ProgramID) (*Program, error) {
|
||||||
|
fd, err := bpfObjGetFDByID(internal.BPF_PROG_GET_FD_BY_ID, uint32(id))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("get program by id: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return newProgramFromFD(fd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newProgramFromFD(fd *internal.FD) (*Program, error) {
|
||||||
|
info, err := newProgramInfoFromFd(fd)
|
||||||
|
if err != nil {
|
||||||
|
fd.Close()
|
||||||
|
return nil, fmt.Errorf("discover program type: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Program{"", fd, "", "", info.Type}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Program) String() string {
|
||||||
|
if p.name != "" {
|
||||||
|
return fmt.Sprintf("%s(%s)#%v", p.typ, p.name, p.fd)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s(%v)", p.typ, p.fd)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the underlying type of the program.
|
||||||
|
func (p *Program) Type() ProgramType {
|
||||||
|
return p.typ
|
||||||
|
}
|
||||||
|
|
||||||
|
// Info returns metadata about the program.
|
||||||
|
//
|
||||||
|
// Requires at least 4.10.
|
||||||
|
func (p *Program) Info() (*ProgramInfo, error) {
|
||||||
|
return newProgramInfoFromFd(p.fd)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FD gets the file descriptor of the Program.
|
||||||
|
//
|
||||||
|
// It is invalid to call this function after Close has been called.
|
||||||
|
func (p *Program) FD() int {
|
||||||
|
fd, err := p.fd.Value()
|
||||||
|
if err != nil {
|
||||||
|
// Best effort: -1 is the number most likely to be an
|
||||||
|
// invalid file descriptor.
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
return int(fd)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone creates a duplicate of the Program.
|
||||||
|
//
|
||||||
|
// Closing the duplicate does not affect the original, and vice versa.
|
||||||
|
//
|
||||||
|
// Cloning a nil Program returns nil.
|
||||||
|
func (p *Program) Clone() (*Program, error) {
|
||||||
|
if p == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
dup, err := p.fd.Dup()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't clone program: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Program{p.VerifierLog, dup, p.name, "", p.typ}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pin persists the Program on the BPF virtual file system past the lifetime of
|
||||||
|
// the process that created it
|
||||||
|
//
|
||||||
|
// This requires bpffs to be mounted above fileName. See https://docs.cilium.io/en/k8s-doc/admin/#admin-mount-bpffs
|
||||||
|
func (p *Program) Pin(fileName string) error {
|
||||||
|
if err := pin(p.pinnedPath, fileName, p.fd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
p.pinnedPath = fileName
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unpin removes the persisted state for the Program from the BPF virtual filesystem.
|
||||||
|
//
|
||||||
|
// Failed calls to Unpin will not alter the state returned by IsPinned.
|
||||||
|
//
|
||||||
|
// Unpinning an unpinned Program returns nil.
|
||||||
|
func (p *Program) Unpin() error {
|
||||||
|
if err := unpin(p.pinnedPath); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
p.pinnedPath = ""
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsPinned returns true if the Program has a non-empty pinned path.
|
||||||
|
func (p *Program) IsPinned() bool {
|
||||||
|
if p.pinnedPath == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close unloads the program from the kernel.
|
||||||
|
func (p *Program) Close() error {
|
||||||
|
if p == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.fd.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test runs the Program in the kernel with the given input and returns the
|
||||||
|
// value returned by the eBPF program. outLen may be zero.
|
||||||
|
//
|
||||||
|
// Note: the kernel expects at least 14 bytes input for an ethernet header for
|
||||||
|
// XDP and SKB programs.
|
||||||
|
//
|
||||||
|
// This function requires at least Linux 4.12.
|
||||||
|
func (p *Program) Test(in []byte) (uint32, []byte, error) {
|
||||||
|
ret, out, _, err := p.testRun(in, 1, nil)
|
||||||
|
if err != nil {
|
||||||
|
return ret, nil, fmt.Errorf("can't test program: %w", err)
|
||||||
|
}
|
||||||
|
return ret, out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Benchmark runs the Program with the given input for a number of times
|
||||||
|
// and returns the time taken per iteration.
|
||||||
|
//
|
||||||
|
// Returns the result of the last execution of the program and the time per
|
||||||
|
// run or an error. reset is called whenever the benchmark syscall is
|
||||||
|
// interrupted, and should be set to testing.B.ResetTimer or similar.
|
||||||
|
//
|
||||||
|
// Note: profiling a call to this function will skew it's results, see
|
||||||
|
// https://github.com/cilium/ebpf/issues/24
|
||||||
|
//
|
||||||
|
// This function requires at least Linux 4.12.
|
||||||
|
func (p *Program) Benchmark(in []byte, repeat int, reset func()) (uint32, time.Duration, error) {
|
||||||
|
ret, _, total, err := p.testRun(in, repeat, reset)
|
||||||
|
if err != nil {
|
||||||
|
return ret, total, fmt.Errorf("can't benchmark program: %w", err)
|
||||||
|
}
|
||||||
|
return ret, total, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() error {
|
||||||
|
prog, err := NewProgram(&ProgramSpec{
|
||||||
|
Type: SocketFilter,
|
||||||
|
Instructions: asm.Instructions{
|
||||||
|
asm.LoadImm(asm.R0, 0, asm.DWord),
|
||||||
|
asm.Return(),
|
||||||
|
},
|
||||||
|
License: "MIT",
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
// This may be because we lack sufficient permissions, etc.
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer prog.Close()
|
||||||
|
|
||||||
|
// Programs require at least 14 bytes input
|
||||||
|
in := make([]byte, 14)
|
||||||
|
attr := bpfProgTestRunAttr{
|
||||||
|
fd: uint32(prog.FD()),
|
||||||
|
dataSizeIn: uint32(len(in)),
|
||||||
|
dataIn: internal.NewSlicePointer(in),
|
||||||
|
}
|
||||||
|
|
||||||
|
err = bpfProgTestRun(&attr)
|
||||||
|
if errors.Is(err, unix.EINVAL) {
|
||||||
|
// Check for EINVAL specifically, rather than err != nil since we
|
||||||
|
// otherwise misdetect due to insufficient permissions.
|
||||||
|
return internal.ErrNotSupported
|
||||||
|
}
|
||||||
|
if errors.Is(err, unix.EINTR) {
|
||||||
|
// We know that PROG_TEST_RUN is supported if we get EINTR.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
|
||||||
|
func (p *Program) testRun(in []byte, repeat int, reset func()) (uint32, []byte, time.Duration, error) {
|
||||||
|
if uint(repeat) > math.MaxUint32 {
|
||||||
|
return 0, nil, 0, fmt.Errorf("repeat is too high")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(in) == 0 {
|
||||||
|
return 0, nil, 0, fmt.Errorf("missing input")
|
||||||
|
}
|
||||||
|
|
||||||
|
if uint(len(in)) > math.MaxUint32 {
|
||||||
|
return 0, nil, 0, fmt.Errorf("input is too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := haveProgTestRun(); err != nil {
|
||||||
|
return 0, nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Older kernels ignore the dataSizeOut argument when copying to user space.
|
||||||
|
// Combined with things like bpf_xdp_adjust_head() we don't really know what the final
|
||||||
|
// size will be. Hence we allocate an output buffer which we hope will always be large
|
||||||
|
// enough, and panic if the kernel wrote past the end of the allocation.
|
||||||
|
// See https://patchwork.ozlabs.org/cover/1006822/
|
||||||
|
out := make([]byte, len(in)+outputPad)
|
||||||
|
|
||||||
|
fd, err := p.fd.Value()
|
||||||
|
if err != nil {
|
||||||
|
return 0, nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
attr := bpfProgTestRunAttr{
|
||||||
|
fd: fd,
|
||||||
|
dataSizeIn: uint32(len(in)),
|
||||||
|
dataSizeOut: uint32(len(out)),
|
||||||
|
dataIn: internal.NewSlicePointer(in),
|
||||||
|
dataOut: internal.NewSlicePointer(out),
|
||||||
|
repeat: uint32(repeat),
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
err = bpfProgTestRun(&attr)
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if errors.Is(err, unix.EINTR) {
|
||||||
|
if reset != nil {
|
||||||
|
reset()
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, nil, 0, fmt.Errorf("can't run test: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if int(attr.dataSizeOut) > cap(out) {
|
||||||
|
// Houston, we have a problem. The program created more data than we allocated,
|
||||||
|
// and the kernel wrote past the end of our buffer.
|
||||||
|
panic("kernel wrote past end of output buffer")
|
||||||
|
}
|
||||||
|
out = out[:int(attr.dataSizeOut)]
|
||||||
|
|
||||||
|
total := time.Duration(attr.duration) * time.Nanosecond
|
||||||
|
return attr.retval, out, total, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalProgram(buf []byte) (*Program, error) {
|
||||||
|
if len(buf) != 4 {
|
||||||
|
return nil, errors.New("program id requires 4 byte value")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Looking up an entry in a nested map or prog array returns an id,
|
||||||
|
// not an fd.
|
||||||
|
id := internal.NativeEndian.Uint32(buf)
|
||||||
|
return NewProgramFromID(ProgramID(id))
|
||||||
|
}
|
||||||
|
|
||||||
|
func marshalProgram(p *Program, length int) ([]byte, error) {
|
||||||
|
if length != 4 {
|
||||||
|
return nil, fmt.Errorf("can't marshal program to %d bytes", length)
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := p.fd.Value()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := make([]byte, 4)
|
||||||
|
internal.NativeEndian.PutUint32(buf, value)
|
||||||
|
return buf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attach a Program.
|
||||||
|
//
|
||||||
|
// Deprecated: use link.RawAttachProgram instead.
|
||||||
|
func (p *Program) Attach(fd int, typ AttachType, flags AttachFlags) error {
|
||||||
|
if fd < 0 {
|
||||||
|
return errors.New("invalid fd")
|
||||||
|
}
|
||||||
|
|
||||||
|
pfd, err := p.fd.Value()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
attr := internal.BPFProgAttachAttr{
|
||||||
|
TargetFd: uint32(fd),
|
||||||
|
AttachBpfFd: pfd,
|
||||||
|
AttachType: uint32(typ),
|
||||||
|
AttachFlags: uint32(flags),
|
||||||
|
}
|
||||||
|
|
||||||
|
return internal.BPFProgAttach(&attr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Detach a Program.
|
||||||
|
//
|
||||||
|
// Deprecated: use link.RawDetachProgram instead.
|
||||||
|
func (p *Program) Detach(fd int, typ AttachType, flags AttachFlags) error {
|
||||||
|
if fd < 0 {
|
||||||
|
return errors.New("invalid fd")
|
||||||
|
}
|
||||||
|
|
||||||
|
if flags != 0 {
|
||||||
|
return errors.New("flags must be zero")
|
||||||
|
}
|
||||||
|
|
||||||
|
pfd, err := p.fd.Value()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
attr := internal.BPFProgDetachAttr{
|
||||||
|
TargetFd: uint32(fd),
|
||||||
|
AttachBpfFd: pfd,
|
||||||
|
AttachType: uint32(typ),
|
||||||
|
}
|
||||||
|
|
||||||
|
return internal.BPFProgDetach(&attr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadPinnedProgram loads a Program from a BPF file.
|
||||||
|
//
|
||||||
|
// Requires at least Linux 4.11.
|
||||||
|
func LoadPinnedProgram(fileName string) (*Program, error) {
|
||||||
|
fd, err := internal.BPFObjGet(fileName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err := newProgramInfoFromFd(fd)
|
||||||
|
if err != nil {
|
||||||
|
_ = fd.Close()
|
||||||
|
return nil, fmt.Errorf("info for %s: %w", fileName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Program{"", fd, filepath.Base(fileName), "", info.Type}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SanitizeName replaces all invalid characters in name with replacement.
|
||||||
|
// Passing a negative value for replacement will delete characters instead
|
||||||
|
// of replacing them. Use this to automatically generate valid names for maps
|
||||||
|
// and programs at runtime.
|
||||||
|
//
|
||||||
|
// The set of allowed characters depends on the running kernel version.
|
||||||
|
// Dots are only allowed as of kernel 5.2.
|
||||||
|
func SanitizeName(name string, replacement rune) string {
|
||||||
|
return strings.Map(func(char rune) rune {
|
||||||
|
if invalidBPFObjNameChar(char) {
|
||||||
|
return replacement
|
||||||
|
}
|
||||||
|
return char
|
||||||
|
}, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProgramGetNextID returns the ID of the next eBPF program.
|
||||||
|
//
|
||||||
|
// Returns ErrNotExist, if there is no next eBPF program.
|
||||||
|
func ProgramGetNextID(startID ProgramID) (ProgramID, error) {
|
||||||
|
id, err := objGetNextID(internal.BPF_PROG_GET_NEXT_ID, uint32(startID))
|
||||||
|
return ProgramID(id), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ID returns the systemwide unique ID of the program.
|
||||||
|
//
|
||||||
|
// Deprecated: use ProgramInfo.ID() instead.
|
||||||
|
func (p *Program) ID() (ProgramID, error) {
|
||||||
|
info, err := bpfGetProgInfoByFD(p.fd)
|
||||||
|
if err != nil {
|
||||||
|
return ProgramID(0), err
|
||||||
|
}
|
||||||
|
return ProgramID(info.id), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func findKernelType(name string, typ btf.Type) error {
|
||||||
|
kernel, err := btf.LoadKernelSpec()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("can't load kernel spec: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return kernel.FindType(name, typ)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resolveBTFType(name string, progType ProgramType, attachType AttachType) (btf.Type, error) {
|
||||||
|
type match struct {
|
||||||
|
p ProgramType
|
||||||
|
a AttachType
|
||||||
|
}
|
||||||
|
|
||||||
|
target := match{progType, attachType}
|
||||||
|
switch target {
|
||||||
|
case match{LSM, AttachLSMMac}:
|
||||||
|
var target btf.Func
|
||||||
|
err := findKernelType("bpf_lsm_"+name, &target)
|
||||||
|
if errors.Is(err, btf.ErrNotFound) {
|
||||||
|
return nil, &internal.UnsupportedFeatureError{
|
||||||
|
Name: name + " LSM hook",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("resolve BTF for LSM hook %s: %w", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &target, nil
|
||||||
|
|
||||||
|
case match{Tracing, AttachTraceIter}:
|
||||||
|
var target btf.Func
|
||||||
|
err := findKernelType("bpf_iter_"+name, &target)
|
||||||
|
if errors.Is(err, btf.ErrNotFound) {
|
||||||
|
return nil, &internal.UnsupportedFeatureError{
|
||||||
|
Name: name + " iterator",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("resolve BTF for iterator %s: %w", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &target, nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,91 @@
|
||||||
|
#!/bin/bash
|
||||||
|
# Test the current package under a different kernel.
|
||||||
|
# Requires virtme and qemu to be installed.
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
if [[ "${1:-}" = "--in-vm" ]]; then
|
||||||
|
shift
|
||||||
|
|
||||||
|
mount -t bpf bpf /sys/fs/bpf
|
||||||
|
export CGO_ENABLED=0
|
||||||
|
export GOFLAGS=-mod=readonly
|
||||||
|
export GOPATH=/run/go-path
|
||||||
|
export GOPROXY=file:///run/go-path/pkg/mod/cache/download
|
||||||
|
export GOSUMDB=off
|
||||||
|
export GOCACHE=/run/go-cache
|
||||||
|
|
||||||
|
if [[ -d "/run/input/bpf" ]]; then
|
||||||
|
export KERNEL_SELFTESTS="/run/input/bpf"
|
||||||
|
fi
|
||||||
|
|
||||||
|
readonly output="${1}"
|
||||||
|
shift
|
||||||
|
|
||||||
|
echo Running tests...
|
||||||
|
go test -v -coverpkg=./... -coverprofile="$output/coverage.txt" -count 1 ./...
|
||||||
|
touch "$output/success"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Pull all dependencies, so that we can run tests without the
|
||||||
|
# vm having network access.
|
||||||
|
go mod download
|
||||||
|
|
||||||
|
# Use sudo if /dev/kvm isn't accessible by the current user.
|
||||||
|
sudo=""
|
||||||
|
if [[ ! -r /dev/kvm || ! -w /dev/kvm ]]; then
|
||||||
|
sudo="sudo"
|
||||||
|
fi
|
||||||
|
readonly sudo
|
||||||
|
|
||||||
|
readonly kernel_version="${1:-}"
|
||||||
|
if [[ -z "${kernel_version}" ]]; then
|
||||||
|
echo "Expecting kernel version as first argument"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
readonly kernel="linux-${kernel_version}.bz"
|
||||||
|
readonly selftests="linux-${kernel_version}-selftests-bpf.bz"
|
||||||
|
readonly input="$(mktemp -d)"
|
||||||
|
readonly output="$(mktemp -d)"
|
||||||
|
readonly tmp_dir="${TMPDIR:-/tmp}"
|
||||||
|
readonly branch="${BRANCH:-master}"
|
||||||
|
|
||||||
|
fetch() {
|
||||||
|
echo Fetching "${1}"
|
||||||
|
wget -nv -N -P "${tmp_dir}" "https://github.com/cilium/ci-kernels/raw/${branch}/${1}"
|
||||||
|
}
|
||||||
|
|
||||||
|
fetch "${kernel}"
|
||||||
|
|
||||||
|
if fetch "${selftests}"; then
|
||||||
|
mkdir "${input}/bpf"
|
||||||
|
tar --strip-components=4 -xjf "${tmp_dir}/${selftests}" -C "${input}/bpf"
|
||||||
|
else
|
||||||
|
echo "No selftests found, disabling"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo Testing on "${kernel_version}"
|
||||||
|
$sudo virtme-run --kimg "${tmp_dir}/${kernel}" --memory 512M --pwd \
|
||||||
|
--rw \
|
||||||
|
--rwdir=/run/input="${input}" \
|
||||||
|
--rwdir=/run/output="${output}" \
|
||||||
|
--rodir=/run/go-path="$(go env GOPATH)" \
|
||||||
|
--rwdir=/run/go-cache="$(go env GOCACHE)" \
|
||||||
|
--script-sh "PATH=\"$PATH\" $(realpath "$0") --in-vm /run/output" \
|
||||||
|
--qemu-opts -smp 2 # need at least two CPUs for some tests
|
||||||
|
|
||||||
|
if [[ ! -e "${output}/success" ]]; then
|
||||||
|
echo "Test failed on ${kernel_version}"
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
echo "Test successful on ${kernel_version}"
|
||||||
|
if [[ -v COVERALLS_TOKEN ]]; then
|
||||||
|
goveralls -coverprofile="${output}/coverage.txt" -service=semaphore -repotoken "$COVERALLS_TOKEN"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
$sudo rm -r "${input}"
|
||||||
|
$sudo rm -r "${output}"
|
|
@ -0,0 +1,491 @@
|
||||||
|
package ebpf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/cilium/ebpf/internal"
|
||||||
|
"github.com/cilium/ebpf/internal/btf"
|
||||||
|
"github.com/cilium/ebpf/internal/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Generic errors returned by BPF syscalls.
|
||||||
|
var ErrNotExist = errors.New("requested object does not exist")
|
||||||
|
|
||||||
|
// bpfObjName is a null-terminated string made up of
|
||||||
|
// 'A-Za-z0-9_' characters.
|
||||||
|
type bpfObjName [unix.BPF_OBJ_NAME_LEN]byte
|
||||||
|
|
||||||
|
// newBPFObjName truncates the result if it is too long.
|
||||||
|
func newBPFObjName(name string) bpfObjName {
|
||||||
|
var result bpfObjName
|
||||||
|
copy(result[:unix.BPF_OBJ_NAME_LEN-1], name)
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// invalidBPFObjNameChar returns true if char may not appear in
|
||||||
|
// a BPF object name.
|
||||||
|
func invalidBPFObjNameChar(char rune) bool {
|
||||||
|
dotAllowed := objNameAllowsDot() == nil
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case char >= 'A' && char <= 'Z':
|
||||||
|
return false
|
||||||
|
case char >= 'a' && char <= 'z':
|
||||||
|
return false
|
||||||
|
case char >= '0' && char <= '9':
|
||||||
|
return false
|
||||||
|
case dotAllowed && char == '.':
|
||||||
|
return false
|
||||||
|
case char == '_':
|
||||||
|
return false
|
||||||
|
default:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type bpfMapCreateAttr struct {
|
||||||
|
mapType MapType
|
||||||
|
keySize uint32
|
||||||
|
valueSize uint32
|
||||||
|
maxEntries uint32
|
||||||
|
flags uint32
|
||||||
|
innerMapFd uint32 // since 4.12 56f668dfe00d
|
||||||
|
numaNode uint32 // since 4.14 96eabe7a40aa
|
||||||
|
mapName bpfObjName // since 4.15 ad5b177bd73f
|
||||||
|
mapIfIndex uint32
|
||||||
|
btfFd uint32
|
||||||
|
btfKeyTypeID btf.TypeID
|
||||||
|
btfValueTypeID btf.TypeID
|
||||||
|
}
|
||||||
|
|
||||||
|
type bpfMapOpAttr struct {
|
||||||
|
mapFd uint32
|
||||||
|
padding uint32
|
||||||
|
key internal.Pointer
|
||||||
|
value internal.Pointer
|
||||||
|
flags uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type bpfBatchMapOpAttr struct {
|
||||||
|
inBatch internal.Pointer
|
||||||
|
outBatch internal.Pointer
|
||||||
|
keys internal.Pointer
|
||||||
|
values internal.Pointer
|
||||||
|
count uint32
|
||||||
|
mapFd uint32
|
||||||
|
elemFlags uint64
|
||||||
|
flags uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type bpfMapInfo struct {
|
||||||
|
map_type uint32 // since 4.12 1e2709769086
|
||||||
|
id uint32
|
||||||
|
key_size uint32
|
||||||
|
value_size uint32
|
||||||
|
max_entries uint32
|
||||||
|
map_flags uint32
|
||||||
|
name bpfObjName // since 4.15 ad5b177bd73f
|
||||||
|
ifindex uint32 // since 4.16 52775b33bb50
|
||||||
|
btf_vmlinux_value_type_id uint32 // since 5.6 85d33df357b6
|
||||||
|
netns_dev uint64 // since 4.16 52775b33bb50
|
||||||
|
netns_ino uint64
|
||||||
|
btf_id uint32 // since 4.18 78958fca7ead
|
||||||
|
btf_key_type_id uint32 // since 4.18 9b2cf328b2ec
|
||||||
|
btf_value_type_id uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type bpfProgLoadAttr struct {
|
||||||
|
progType ProgramType
|
||||||
|
insCount uint32
|
||||||
|
instructions internal.Pointer
|
||||||
|
license internal.Pointer
|
||||||
|
logLevel uint32
|
||||||
|
logSize uint32
|
||||||
|
logBuf internal.Pointer
|
||||||
|
kernelVersion uint32 // since 4.1 2541517c32be
|
||||||
|
progFlags uint32 // since 4.11 e07b98d9bffe
|
||||||
|
progName bpfObjName // since 4.15 067cae47771c
|
||||||
|
progIfIndex uint32 // since 4.15 1f6f4cb7ba21
|
||||||
|
expectedAttachType AttachType // since 4.17 5e43f899b03a
|
||||||
|
progBTFFd uint32
|
||||||
|
funcInfoRecSize uint32
|
||||||
|
funcInfo internal.Pointer
|
||||||
|
funcInfoCnt uint32
|
||||||
|
lineInfoRecSize uint32
|
||||||
|
lineInfo internal.Pointer
|
||||||
|
lineInfoCnt uint32
|
||||||
|
attachBTFID btf.TypeID
|
||||||
|
attachProgFd uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type bpfProgInfo struct {
|
||||||
|
prog_type uint32
|
||||||
|
id uint32
|
||||||
|
tag [unix.BPF_TAG_SIZE]byte
|
||||||
|
jited_prog_len uint32
|
||||||
|
xlated_prog_len uint32
|
||||||
|
jited_prog_insns internal.Pointer
|
||||||
|
xlated_prog_insns internal.Pointer
|
||||||
|
load_time uint64 // since 4.15 cb4d2b3f03d8
|
||||||
|
created_by_uid uint32
|
||||||
|
nr_map_ids uint32
|
||||||
|
map_ids internal.Pointer
|
||||||
|
name bpfObjName // since 4.15 067cae47771c
|
||||||
|
ifindex uint32
|
||||||
|
gpl_compatible uint32
|
||||||
|
netns_dev uint64
|
||||||
|
netns_ino uint64
|
||||||
|
nr_jited_ksyms uint32
|
||||||
|
nr_jited_func_lens uint32
|
||||||
|
jited_ksyms internal.Pointer
|
||||||
|
jited_func_lens internal.Pointer
|
||||||
|
btf_id uint32
|
||||||
|
func_info_rec_size uint32
|
||||||
|
func_info internal.Pointer
|
||||||
|
nr_func_info uint32
|
||||||
|
nr_line_info uint32
|
||||||
|
line_info internal.Pointer
|
||||||
|
jited_line_info internal.Pointer
|
||||||
|
nr_jited_line_info uint32
|
||||||
|
line_info_rec_size uint32
|
||||||
|
jited_line_info_rec_size uint32
|
||||||
|
nr_prog_tags uint32
|
||||||
|
prog_tags internal.Pointer
|
||||||
|
run_time_ns uint64
|
||||||
|
run_cnt uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type bpfProgTestRunAttr struct {
|
||||||
|
fd uint32
|
||||||
|
retval uint32
|
||||||
|
dataSizeIn uint32
|
||||||
|
dataSizeOut uint32
|
||||||
|
dataIn internal.Pointer
|
||||||
|
dataOut internal.Pointer
|
||||||
|
repeat uint32
|
||||||
|
duration uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type bpfGetFDByIDAttr struct {
|
||||||
|
id uint32
|
||||||
|
next uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type bpfMapFreezeAttr struct {
|
||||||
|
mapFd uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type bpfObjGetNextIDAttr struct {
|
||||||
|
startID uint32
|
||||||
|
nextID uint32
|
||||||
|
openFlags uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func bpfProgLoad(attr *bpfProgLoadAttr) (*internal.FD, error) {
|
||||||
|
for {
|
||||||
|
fd, err := internal.BPF(internal.BPF_PROG_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
||||||
|
// As of ~4.20 the verifier can be interrupted by a signal,
|
||||||
|
// and returns EAGAIN in that case.
|
||||||
|
if err == unix.EAGAIN {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return internal.NewFD(uint32(fd)), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func bpfProgTestRun(attr *bpfProgTestRunAttr) error {
|
||||||
|
_, err := internal.BPF(internal.BPF_PROG_TEST_RUN, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func bpfMapCreate(attr *bpfMapCreateAttr) (*internal.FD, error) {
|
||||||
|
fd, err := internal.BPF(internal.BPF_MAP_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return internal.NewFD(uint32(fd)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() error {
|
||||||
|
_, err := bpfMapCreate(&bpfMapCreateAttr{
|
||||||
|
mapType: ArrayOfMaps,
|
||||||
|
keySize: 4,
|
||||||
|
valueSize: 4,
|
||||||
|
maxEntries: 1,
|
||||||
|
// Invalid file descriptor.
|
||||||
|
innerMapFd: ^uint32(0),
|
||||||
|
})
|
||||||
|
if errors.Is(err, unix.EINVAL) {
|
||||||
|
return internal.ErrNotSupported
|
||||||
|
}
|
||||||
|
if errors.Is(err, unix.EBADF) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
|
||||||
|
var haveMapMutabilityModifiers = internal.FeatureTest("read- and write-only maps", "5.2", func() error {
|
||||||
|
// This checks BPF_F_RDONLY_PROG and BPF_F_WRONLY_PROG. Since
|
||||||
|
// BPF_MAP_FREEZE appeared in 5.2 as well we don't do a separate check.
|
||||||
|
m, err := bpfMapCreate(&bpfMapCreateAttr{
|
||||||
|
mapType: Array,
|
||||||
|
keySize: 4,
|
||||||
|
valueSize: 4,
|
||||||
|
maxEntries: 1,
|
||||||
|
flags: unix.BPF_F_RDONLY_PROG,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return internal.ErrNotSupported
|
||||||
|
}
|
||||||
|
_ = m.Close()
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
func bpfMapLookupElem(m *internal.FD, key, valueOut internal.Pointer) error {
|
||||||
|
fd, err := m.Value()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
attr := bpfMapOpAttr{
|
||||||
|
mapFd: fd,
|
||||||
|
key: key,
|
||||||
|
value: valueOut,
|
||||||
|
}
|
||||||
|
_, err = internal.BPF(internal.BPF_MAP_LOOKUP_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
||||||
|
return wrapMapError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func bpfMapLookupAndDelete(m *internal.FD, key, valueOut internal.Pointer) error {
|
||||||
|
fd, err := m.Value()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
attr := bpfMapOpAttr{
|
||||||
|
mapFd: fd,
|
||||||
|
key: key,
|
||||||
|
value: valueOut,
|
||||||
|
}
|
||||||
|
_, err = internal.BPF(internal.BPF_MAP_LOOKUP_AND_DELETE_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
||||||
|
return wrapMapError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func bpfMapUpdateElem(m *internal.FD, key, valueOut internal.Pointer, flags uint64) error {
|
||||||
|
fd, err := m.Value()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
attr := bpfMapOpAttr{
|
||||||
|
mapFd: fd,
|
||||||
|
key: key,
|
||||||
|
value: valueOut,
|
||||||
|
flags: flags,
|
||||||
|
}
|
||||||
|
_, err = internal.BPF(internal.BPF_MAP_UPDATE_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
||||||
|
return wrapMapError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func bpfMapDeleteElem(m *internal.FD, key internal.Pointer) error {
|
||||||
|
fd, err := m.Value()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
attr := bpfMapOpAttr{
|
||||||
|
mapFd: fd,
|
||||||
|
key: key,
|
||||||
|
}
|
||||||
|
_, err = internal.BPF(internal.BPF_MAP_DELETE_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
||||||
|
return wrapMapError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func bpfMapGetNextKey(m *internal.FD, key, nextKeyOut internal.Pointer) error {
|
||||||
|
fd, err := m.Value()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
attr := bpfMapOpAttr{
|
||||||
|
mapFd: fd,
|
||||||
|
key: key,
|
||||||
|
value: nextKeyOut,
|
||||||
|
}
|
||||||
|
_, err = internal.BPF(internal.BPF_MAP_GET_NEXT_KEY, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
||||||
|
return wrapMapError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func objGetNextID(cmd internal.BPFCmd, start uint32) (uint32, error) {
|
||||||
|
attr := bpfObjGetNextIDAttr{
|
||||||
|
startID: start,
|
||||||
|
}
|
||||||
|
_, err := internal.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
||||||
|
return attr.nextID, wrapObjError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func bpfMapBatch(cmd internal.BPFCmd, m *internal.FD, inBatch, outBatch, keys, values internal.Pointer, count uint32, opts *BatchOptions) (uint32, error) {
|
||||||
|
fd, err := m.Value()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
attr := bpfBatchMapOpAttr{
|
||||||
|
inBatch: inBatch,
|
||||||
|
outBatch: outBatch,
|
||||||
|
keys: keys,
|
||||||
|
values: values,
|
||||||
|
count: count,
|
||||||
|
mapFd: fd,
|
||||||
|
}
|
||||||
|
if opts != nil {
|
||||||
|
attr.elemFlags = opts.ElemFlags
|
||||||
|
attr.flags = opts.Flags
|
||||||
|
}
|
||||||
|
_, err = internal.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
||||||
|
// always return count even on an error, as things like update might partially be fulfilled.
|
||||||
|
return attr.count, wrapMapError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func wrapObjError(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if errors.Is(err, unix.ENOENT) {
|
||||||
|
return fmt.Errorf("%w", ErrNotExist)
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors.New(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
func wrapMapError(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if errors.Is(err, unix.ENOENT) {
|
||||||
|
return ErrKeyNotExist
|
||||||
|
}
|
||||||
|
|
||||||
|
if errors.Is(err, unix.EEXIST) {
|
||||||
|
return ErrKeyExist
|
||||||
|
}
|
||||||
|
|
||||||
|
if errors.Is(err, unix.ENOTSUPP) {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors.New(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
func bpfMapFreeze(m *internal.FD) error {
|
||||||
|
fd, err := m.Value()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
attr := bpfMapFreezeAttr{
|
||||||
|
mapFd: fd,
|
||||||
|
}
|
||||||
|
_, err = internal.BPF(internal.BPF_MAP_FREEZE, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func bpfGetProgInfoByFD(fd *internal.FD) (*bpfProgInfo, error) {
|
||||||
|
var info bpfProgInfo
|
||||||
|
if err := internal.BPFObjGetInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info)); err != nil {
|
||||||
|
return nil, fmt.Errorf("can't get program info: %w", err)
|
||||||
|
}
|
||||||
|
return &info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func bpfGetMapInfoByFD(fd *internal.FD) (*bpfMapInfo, error) {
|
||||||
|
var info bpfMapInfo
|
||||||
|
err := internal.BPFObjGetInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't get map info: %w", err)
|
||||||
|
}
|
||||||
|
return &info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var haveObjName = internal.FeatureTest("object names", "4.15", func() error {
|
||||||
|
attr := bpfMapCreateAttr{
|
||||||
|
mapType: Array,
|
||||||
|
keySize: 4,
|
||||||
|
valueSize: 4,
|
||||||
|
maxEntries: 1,
|
||||||
|
mapName: newBPFObjName("feature_test"),
|
||||||
|
}
|
||||||
|
|
||||||
|
fd, err := bpfMapCreate(&attr)
|
||||||
|
if err != nil {
|
||||||
|
return internal.ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = fd.Close()
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
var objNameAllowsDot = internal.FeatureTest("dot in object names", "5.2", func() error {
|
||||||
|
if err := haveObjName(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
attr := bpfMapCreateAttr{
|
||||||
|
mapType: Array,
|
||||||
|
keySize: 4,
|
||||||
|
valueSize: 4,
|
||||||
|
maxEntries: 1,
|
||||||
|
mapName: newBPFObjName(".test"),
|
||||||
|
}
|
||||||
|
|
||||||
|
fd, err := bpfMapCreate(&attr)
|
||||||
|
if err != nil {
|
||||||
|
return internal.ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = fd.Close()
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
var haveBatchAPI = internal.FeatureTest("map batch api", "5.6", func() error {
|
||||||
|
var maxEntries uint32 = 2
|
||||||
|
attr := bpfMapCreateAttr{
|
||||||
|
mapType: Hash,
|
||||||
|
keySize: 4,
|
||||||
|
valueSize: 4,
|
||||||
|
maxEntries: maxEntries,
|
||||||
|
}
|
||||||
|
|
||||||
|
fd, err := bpfMapCreate(&attr)
|
||||||
|
if err != nil {
|
||||||
|
return internal.ErrNotSupported
|
||||||
|
}
|
||||||
|
defer fd.Close()
|
||||||
|
keys := []uint32{1, 2}
|
||||||
|
values := []uint32{3, 4}
|
||||||
|
kp, _ := marshalPtr(keys, 8)
|
||||||
|
vp, _ := marshalPtr(values, 8)
|
||||||
|
nilPtr := internal.NewPointer(nil)
|
||||||
|
_, err = bpfMapBatch(internal.BPF_MAP_UPDATE_BATCH, fd, nilPtr, nilPtr, kp, vp, maxEntries, nil)
|
||||||
|
if err != nil {
|
||||||
|
return internal.ErrNotSupported
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
func bpfObjGetFDByID(cmd internal.BPFCmd, id uint32) (*internal.FD, error) {
|
||||||
|
attr := bpfGetFDByIDAttr{
|
||||||
|
id: id,
|
||||||
|
}
|
||||||
|
ptr, err := internal.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
||||||
|
return internal.NewFD(uint32(ptr)), wrapObjError(err)
|
||||||
|
}
|
|
@ -0,0 +1,213 @@
|
||||||
|
package ebpf
|
||||||
|
|
||||||
|
//go:generate stringer -output types_string.go -type=MapType,ProgramType,AttachType,PinType
|
||||||
|
|
||||||
|
// MapType indicates the type map structure
|
||||||
|
// that will be initialized in the kernel.
|
||||||
|
type MapType uint32
|
||||||
|
|
||||||
|
// All the various map types that can be created
|
||||||
|
const (
|
||||||
|
UnspecifiedMap MapType = iota
|
||||||
|
// Hash is a hash map
|
||||||
|
Hash
|
||||||
|
// Array is an array map
|
||||||
|
Array
|
||||||
|
// ProgramArray - A program array map is a special kind of array map whose map
|
||||||
|
// values contain only file descriptors referring to other eBPF
|
||||||
|
// programs. Thus, both the key_size and value_size must be
|
||||||
|
// exactly four bytes. This map is used in conjunction with the
|
||||||
|
// TailCall helper.
|
||||||
|
ProgramArray
|
||||||
|
// PerfEventArray - A perf event array is used in conjunction with PerfEventRead
|
||||||
|
// and PerfEventOutput calls, to read the raw bpf_perf_data from the registers.
|
||||||
|
PerfEventArray
|
||||||
|
// PerCPUHash - This data structure is useful for people who have high performance
|
||||||
|
// network needs and can reconcile adds at the end of some cycle, so that
|
||||||
|
// hashes can be lock free without the use of XAdd, which can be costly.
|
||||||
|
PerCPUHash
|
||||||
|
// PerCPUArray - This data structure is useful for people who have high performance
|
||||||
|
// network needs and can reconcile adds at the end of some cycle, so that
|
||||||
|
// hashes can be lock free without the use of XAdd, which can be costly.
|
||||||
|
// Each CPU gets a copy of this hash, the contents of all of which can be reconciled
|
||||||
|
// later.
|
||||||
|
PerCPUArray
|
||||||
|
// StackTrace - This holds whole user and kernel stack traces, it can be retrieved with
|
||||||
|
// GetStackID
|
||||||
|
StackTrace
|
||||||
|
// CGroupArray - This is a very niche structure used to help SKBInCGroup determine
|
||||||
|
// if an skb is from a socket belonging to a specific cgroup
|
||||||
|
CGroupArray
|
||||||
|
// LRUHash - This allows you to create a small hash structure that will purge the
|
||||||
|
// least recently used items rather than thow an error when you run out of memory
|
||||||
|
LRUHash
|
||||||
|
// LRUCPUHash - This is NOT like PerCPUHash, this structure is shared among the CPUs,
|
||||||
|
// it has more to do with including the CPU id with the LRU calculation so that if a
|
||||||
|
// particular CPU is using a value over-and-over again, then it will be saved, but if
|
||||||
|
// a value is being retrieved a lot but sparsely across CPUs it is not as important, basically
|
||||||
|
// giving weight to CPU locality over overall usage.
|
||||||
|
LRUCPUHash
|
||||||
|
// LPMTrie - This is an implementation of Longest-Prefix-Match Trie structure. It is useful,
|
||||||
|
// for storing things like IP addresses which can be bit masked allowing for keys of differing
|
||||||
|
// values to refer to the same reference based on their masks. See wikipedia for more details.
|
||||||
|
LPMTrie
|
||||||
|
// ArrayOfMaps - Each item in the array is another map. The inner map mustn't be a map of maps
|
||||||
|
// itself.
|
||||||
|
ArrayOfMaps
|
||||||
|
// HashOfMaps - Each item in the hash map is another map. The inner map mustn't be a map of maps
|
||||||
|
// itself.
|
||||||
|
HashOfMaps
|
||||||
|
// DevMap - Specialized map to store references to network devices.
|
||||||
|
DevMap
|
||||||
|
// SockMap - Specialized map to store references to sockets.
|
||||||
|
SockMap
|
||||||
|
// CPUMap - Specialized map to store references to CPUs.
|
||||||
|
CPUMap
|
||||||
|
// XSKMap - Specialized map for XDP programs to store references to open sockets.
|
||||||
|
XSKMap
|
||||||
|
// SockHash - Specialized hash to store references to sockets.
|
||||||
|
SockHash
|
||||||
|
// CGroupStorage - Special map for CGroups.
|
||||||
|
CGroupStorage
|
||||||
|
// ReusePortSockArray - Specialized map to store references to sockets that can be reused.
|
||||||
|
ReusePortSockArray
|
||||||
|
// PerCPUCGroupStorage - Special per CPU map for CGroups.
|
||||||
|
PerCPUCGroupStorage
|
||||||
|
// Queue - FIFO storage for BPF programs.
|
||||||
|
Queue
|
||||||
|
// Stack - LIFO storage for BPF programs.
|
||||||
|
Stack
|
||||||
|
// SkStorage - Specialized map for local storage at SK for BPF programs.
|
||||||
|
SkStorage
|
||||||
|
// DevMapHash - Hash-based indexing scheme for references to network devices.
|
||||||
|
DevMapHash
|
||||||
|
)
|
||||||
|
|
||||||
|
// hasPerCPUValue returns true if the Map stores a value per CPU.
|
||||||
|
func (mt MapType) hasPerCPUValue() bool {
|
||||||
|
return mt == PerCPUHash || mt == PerCPUArray || mt == LRUCPUHash
|
||||||
|
}
|
||||||
|
|
||||||
|
// canStoreMap returns true if the map type accepts a map fd
|
||||||
|
// for update and returns a map id for lookup.
|
||||||
|
func (mt MapType) canStoreMap() bool {
|
||||||
|
return mt == ArrayOfMaps || mt == HashOfMaps
|
||||||
|
}
|
||||||
|
|
||||||
|
// canStoreProgram returns true if the map type accepts a program fd
|
||||||
|
// for update and returns a program id for lookup.
|
||||||
|
func (mt MapType) canStoreProgram() bool {
|
||||||
|
return mt == ProgramArray
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProgramType of the eBPF program
|
||||||
|
type ProgramType uint32
|
||||||
|
|
||||||
|
// eBPF program types
|
||||||
|
const (
|
||||||
|
UnspecifiedProgram ProgramType = iota
|
||||||
|
SocketFilter
|
||||||
|
Kprobe
|
||||||
|
SchedCLS
|
||||||
|
SchedACT
|
||||||
|
TracePoint
|
||||||
|
XDP
|
||||||
|
PerfEvent
|
||||||
|
CGroupSKB
|
||||||
|
CGroupSock
|
||||||
|
LWTIn
|
||||||
|
LWTOut
|
||||||
|
LWTXmit
|
||||||
|
SockOps
|
||||||
|
SkSKB
|
||||||
|
CGroupDevice
|
||||||
|
SkMsg
|
||||||
|
RawTracepoint
|
||||||
|
CGroupSockAddr
|
||||||
|
LWTSeg6Local
|
||||||
|
LircMode2
|
||||||
|
SkReuseport
|
||||||
|
FlowDissector
|
||||||
|
CGroupSysctl
|
||||||
|
RawTracepointWritable
|
||||||
|
CGroupSockopt
|
||||||
|
Tracing
|
||||||
|
StructOps
|
||||||
|
Extension
|
||||||
|
LSM
|
||||||
|
SkLookup
|
||||||
|
)
|
||||||
|
|
||||||
|
// AttachType of the eBPF program, needed to differentiate allowed context accesses in
|
||||||
|
// some newer program types like CGroupSockAddr. Should be set to AttachNone if not required.
|
||||||
|
// Will cause invalid argument (EINVAL) at program load time if set incorrectly.
|
||||||
|
type AttachType uint32
|
||||||
|
|
||||||
|
// AttachNone is an alias for AttachCGroupInetIngress for readability reasons.
|
||||||
|
const AttachNone AttachType = 0
|
||||||
|
|
||||||
|
const (
|
||||||
|
AttachCGroupInetIngress AttachType = iota
|
||||||
|
AttachCGroupInetEgress
|
||||||
|
AttachCGroupInetSockCreate
|
||||||
|
AttachCGroupSockOps
|
||||||
|
AttachSkSKBStreamParser
|
||||||
|
AttachSkSKBStreamVerdict
|
||||||
|
AttachCGroupDevice
|
||||||
|
AttachSkMsgVerdict
|
||||||
|
AttachCGroupInet4Bind
|
||||||
|
AttachCGroupInet6Bind
|
||||||
|
AttachCGroupInet4Connect
|
||||||
|
AttachCGroupInet6Connect
|
||||||
|
AttachCGroupInet4PostBind
|
||||||
|
AttachCGroupInet6PostBind
|
||||||
|
AttachCGroupUDP4Sendmsg
|
||||||
|
AttachCGroupUDP6Sendmsg
|
||||||
|
AttachLircMode2
|
||||||
|
AttachFlowDissector
|
||||||
|
AttachCGroupSysctl
|
||||||
|
AttachCGroupUDP4Recvmsg
|
||||||
|
AttachCGroupUDP6Recvmsg
|
||||||
|
AttachCGroupGetsockopt
|
||||||
|
AttachCGroupSetsockopt
|
||||||
|
AttachTraceRawTp
|
||||||
|
AttachTraceFEntry
|
||||||
|
AttachTraceFExit
|
||||||
|
AttachModifyReturn
|
||||||
|
AttachLSMMac
|
||||||
|
AttachTraceIter
|
||||||
|
AttachCgroupInet4GetPeername
|
||||||
|
AttachCgroupInet6GetPeername
|
||||||
|
AttachCgroupInet4GetSockname
|
||||||
|
AttachCgroupInet6GetSockname
|
||||||
|
AttachXDPDevMap
|
||||||
|
AttachCgroupInetSockRelease
|
||||||
|
AttachXDPCPUMap
|
||||||
|
AttachSkLookup
|
||||||
|
AttachXDP
|
||||||
|
)
|
||||||
|
|
||||||
|
// AttachFlags of the eBPF program used in BPF_PROG_ATTACH command
|
||||||
|
type AttachFlags uint32
|
||||||
|
|
||||||
|
// PinType determines whether a map is pinned into a BPFFS.
|
||||||
|
type PinType int
|
||||||
|
|
||||||
|
// Valid pin types.
|
||||||
|
//
|
||||||
|
// Mirrors enum libbpf_pin_type.
|
||||||
|
const (
|
||||||
|
PinNone PinType = iota
|
||||||
|
// Pin an object by using its name as the filename.
|
||||||
|
PinByName
|
||||||
|
)
|
||||||
|
|
||||||
|
// BatchOptions batch map operations options
|
||||||
|
//
|
||||||
|
// Mirrors libbpf struct bpf_map_batch_opts
|
||||||
|
// Currently BPF_F_FLAG is the only supported
|
||||||
|
// flag (for ElemFlags).
|
||||||
|
type BatchOptions struct {
|
||||||
|
ElemFlags uint64
|
||||||
|
Flags uint64
|
||||||
|
}
|
|
@ -0,0 +1,168 @@
|
||||||
|
// Code generated by "stringer -output types_string.go -type=MapType,ProgramType,AttachType,PinType"; DO NOT EDIT.
|
||||||
|
|
||||||
|
package ebpf
|
||||||
|
|
||||||
|
import "strconv"
|
||||||
|
|
||||||
|
func _() {
|
||||||
|
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||||
|
// Re-run the stringer command to generate them again.
|
||||||
|
var x [1]struct{}
|
||||||
|
_ = x[UnspecifiedMap-0]
|
||||||
|
_ = x[Hash-1]
|
||||||
|
_ = x[Array-2]
|
||||||
|
_ = x[ProgramArray-3]
|
||||||
|
_ = x[PerfEventArray-4]
|
||||||
|
_ = x[PerCPUHash-5]
|
||||||
|
_ = x[PerCPUArray-6]
|
||||||
|
_ = x[StackTrace-7]
|
||||||
|
_ = x[CGroupArray-8]
|
||||||
|
_ = x[LRUHash-9]
|
||||||
|
_ = x[LRUCPUHash-10]
|
||||||
|
_ = x[LPMTrie-11]
|
||||||
|
_ = x[ArrayOfMaps-12]
|
||||||
|
_ = x[HashOfMaps-13]
|
||||||
|
_ = x[DevMap-14]
|
||||||
|
_ = x[SockMap-15]
|
||||||
|
_ = x[CPUMap-16]
|
||||||
|
_ = x[XSKMap-17]
|
||||||
|
_ = x[SockHash-18]
|
||||||
|
_ = x[CGroupStorage-19]
|
||||||
|
_ = x[ReusePortSockArray-20]
|
||||||
|
_ = x[PerCPUCGroupStorage-21]
|
||||||
|
_ = x[Queue-22]
|
||||||
|
_ = x[Stack-23]
|
||||||
|
_ = x[SkStorage-24]
|
||||||
|
_ = x[DevMapHash-25]
|
||||||
|
}
|
||||||
|
|
||||||
|
const _MapType_name = "UnspecifiedMapHashArrayProgramArrayPerfEventArrayPerCPUHashPerCPUArrayStackTraceCGroupArrayLRUHashLRUCPUHashLPMTrieArrayOfMapsHashOfMapsDevMapSockMapCPUMapXSKMapSockHashCGroupStorageReusePortSockArrayPerCPUCGroupStorageQueueStackSkStorageDevMapHash"
|
||||||
|
|
||||||
|
var _MapType_index = [...]uint8{0, 14, 18, 23, 35, 49, 59, 70, 80, 91, 98, 108, 115, 126, 136, 142, 149, 155, 161, 169, 182, 200, 219, 224, 229, 238, 248}
|
||||||
|
|
||||||
|
func (i MapType) String() string {
|
||||||
|
if i >= MapType(len(_MapType_index)-1) {
|
||||||
|
return "MapType(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||||
|
}
|
||||||
|
return _MapType_name[_MapType_index[i]:_MapType_index[i+1]]
|
||||||
|
}
|
||||||
|
func _() {
|
||||||
|
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||||
|
// Re-run the stringer command to generate them again.
|
||||||
|
var x [1]struct{}
|
||||||
|
_ = x[UnspecifiedProgram-0]
|
||||||
|
_ = x[SocketFilter-1]
|
||||||
|
_ = x[Kprobe-2]
|
||||||
|
_ = x[SchedCLS-3]
|
||||||
|
_ = x[SchedACT-4]
|
||||||
|
_ = x[TracePoint-5]
|
||||||
|
_ = x[XDP-6]
|
||||||
|
_ = x[PerfEvent-7]
|
||||||
|
_ = x[CGroupSKB-8]
|
||||||
|
_ = x[CGroupSock-9]
|
||||||
|
_ = x[LWTIn-10]
|
||||||
|
_ = x[LWTOut-11]
|
||||||
|
_ = x[LWTXmit-12]
|
||||||
|
_ = x[SockOps-13]
|
||||||
|
_ = x[SkSKB-14]
|
||||||
|
_ = x[CGroupDevice-15]
|
||||||
|
_ = x[SkMsg-16]
|
||||||
|
_ = x[RawTracepoint-17]
|
||||||
|
_ = x[CGroupSockAddr-18]
|
||||||
|
_ = x[LWTSeg6Local-19]
|
||||||
|
_ = x[LircMode2-20]
|
||||||
|
_ = x[SkReuseport-21]
|
||||||
|
_ = x[FlowDissector-22]
|
||||||
|
_ = x[CGroupSysctl-23]
|
||||||
|
_ = x[RawTracepointWritable-24]
|
||||||
|
_ = x[CGroupSockopt-25]
|
||||||
|
_ = x[Tracing-26]
|
||||||
|
_ = x[StructOps-27]
|
||||||
|
_ = x[Extension-28]
|
||||||
|
_ = x[LSM-29]
|
||||||
|
_ = x[SkLookup-30]
|
||||||
|
}
|
||||||
|
|
||||||
|
const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookup"
|
||||||
|
|
||||||
|
var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294}
|
||||||
|
|
||||||
|
func (i ProgramType) String() string {
|
||||||
|
if i >= ProgramType(len(_ProgramType_index)-1) {
|
||||||
|
return "ProgramType(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||||
|
}
|
||||||
|
return _ProgramType_name[_ProgramType_index[i]:_ProgramType_index[i+1]]
|
||||||
|
}
|
||||||
|
func _() {
|
||||||
|
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||||
|
// Re-run the stringer command to generate them again.
|
||||||
|
var x [1]struct{}
|
||||||
|
_ = x[AttachNone-0]
|
||||||
|
_ = x[AttachCGroupInetIngress-0]
|
||||||
|
_ = x[AttachCGroupInetEgress-1]
|
||||||
|
_ = x[AttachCGroupInetSockCreate-2]
|
||||||
|
_ = x[AttachCGroupSockOps-3]
|
||||||
|
_ = x[AttachSkSKBStreamParser-4]
|
||||||
|
_ = x[AttachSkSKBStreamVerdict-5]
|
||||||
|
_ = x[AttachCGroupDevice-6]
|
||||||
|
_ = x[AttachSkMsgVerdict-7]
|
||||||
|
_ = x[AttachCGroupInet4Bind-8]
|
||||||
|
_ = x[AttachCGroupInet6Bind-9]
|
||||||
|
_ = x[AttachCGroupInet4Connect-10]
|
||||||
|
_ = x[AttachCGroupInet6Connect-11]
|
||||||
|
_ = x[AttachCGroupInet4PostBind-12]
|
||||||
|
_ = x[AttachCGroupInet6PostBind-13]
|
||||||
|
_ = x[AttachCGroupUDP4Sendmsg-14]
|
||||||
|
_ = x[AttachCGroupUDP6Sendmsg-15]
|
||||||
|
_ = x[AttachLircMode2-16]
|
||||||
|
_ = x[AttachFlowDissector-17]
|
||||||
|
_ = x[AttachCGroupSysctl-18]
|
||||||
|
_ = x[AttachCGroupUDP4Recvmsg-19]
|
||||||
|
_ = x[AttachCGroupUDP6Recvmsg-20]
|
||||||
|
_ = x[AttachCGroupGetsockopt-21]
|
||||||
|
_ = x[AttachCGroupSetsockopt-22]
|
||||||
|
_ = x[AttachTraceRawTp-23]
|
||||||
|
_ = x[AttachTraceFEntry-24]
|
||||||
|
_ = x[AttachTraceFExit-25]
|
||||||
|
_ = x[AttachModifyReturn-26]
|
||||||
|
_ = x[AttachLSMMac-27]
|
||||||
|
_ = x[AttachTraceIter-28]
|
||||||
|
_ = x[AttachCgroupInet4GetPeername-29]
|
||||||
|
_ = x[AttachCgroupInet6GetPeername-30]
|
||||||
|
_ = x[AttachCgroupInet4GetSockname-31]
|
||||||
|
_ = x[AttachCgroupInet6GetSockname-32]
|
||||||
|
_ = x[AttachXDPDevMap-33]
|
||||||
|
_ = x[AttachCgroupInetSockRelease-34]
|
||||||
|
_ = x[AttachXDPCPUMap-35]
|
||||||
|
_ = x[AttachSkLookup-36]
|
||||||
|
_ = x[AttachXDP-37]
|
||||||
|
}
|
||||||
|
|
||||||
|
const _AttachType_name = "AttachNoneAttachCGroupInetEgressAttachCGroupInetSockCreateAttachCGroupSockOpsAttachSkSKBStreamParserAttachSkSKBStreamVerdictAttachCGroupDeviceAttachSkMsgVerdictAttachCGroupInet4BindAttachCGroupInet6BindAttachCGroupInet4ConnectAttachCGroupInet6ConnectAttachCGroupInet4PostBindAttachCGroupInet6PostBindAttachCGroupUDP4SendmsgAttachCGroupUDP6SendmsgAttachLircMode2AttachFlowDissectorAttachCGroupSysctlAttachCGroupUDP4RecvmsgAttachCGroupUDP6RecvmsgAttachCGroupGetsockoptAttachCGroupSetsockoptAttachTraceRawTpAttachTraceFEntryAttachTraceFExitAttachModifyReturnAttachLSMMacAttachTraceIterAttachCgroupInet4GetPeernameAttachCgroupInet6GetPeernameAttachCgroupInet4GetSocknameAttachCgroupInet6GetSocknameAttachXDPDevMapAttachCgroupInetSockReleaseAttachXDPCPUMapAttachSkLookupAttachXDP"
|
||||||
|
|
||||||
|
var _AttachType_index = [...]uint16{0, 10, 32, 58, 77, 100, 124, 142, 160, 181, 202, 226, 250, 275, 300, 323, 346, 361, 380, 398, 421, 444, 466, 488, 504, 521, 537, 555, 567, 582, 610, 638, 666, 694, 709, 736, 751, 765, 774}
|
||||||
|
|
||||||
|
func (i AttachType) String() string {
|
||||||
|
if i >= AttachType(len(_AttachType_index)-1) {
|
||||||
|
return "AttachType(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||||
|
}
|
||||||
|
return _AttachType_name[_AttachType_index[i]:_AttachType_index[i+1]]
|
||||||
|
}
|
||||||
|
func _() {
|
||||||
|
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||||
|
// Re-run the stringer command to generate them again.
|
||||||
|
var x [1]struct{}
|
||||||
|
_ = x[PinNone-0]
|
||||||
|
_ = x[PinByName-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
const _PinType_name = "PinNonePinByName"
|
||||||
|
|
||||||
|
var _PinType_index = [...]uint8{0, 7, 16}
|
||||||
|
|
||||||
|
func (i PinType) String() string {
|
||||||
|
if i < 0 || i >= PinType(len(_PinType_index)-1) {
|
||||||
|
return "PinType(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||||
|
}
|
||||||
|
return _PinType_name[_PinType_index[i]:_PinType_index[i+1]]
|
||||||
|
}
|
|
@ -0,0 +1,2 @@
|
||||||
|
example/example
|
||||||
|
cmd/cgctl/cgctl
|
|
@ -0,0 +1,201 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
|
@ -0,0 +1,24 @@
|
||||||
|
# Copyright The containerd Authors.
|
||||||
|
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
PACKAGES=$(shell go list ./... | grep -v /vendor/)
|
||||||
|
|
||||||
|
all: cgutil
|
||||||
|
go build -v
|
||||||
|
|
||||||
|
cgutil:
|
||||||
|
cd cmd/cgctl && go build -v
|
||||||
|
|
||||||
|
proto:
|
||||||
|
protobuild --quiet ${PACKAGES}
|
|
@ -0,0 +1,46 @@
|
||||||
|
version = "unstable"
|
||||||
|
generator = "gogoctrd"
|
||||||
|
plugins = ["grpc"]
|
||||||
|
|
||||||
|
# Control protoc include paths. Below are usually some good defaults, but feel
|
||||||
|
# free to try it without them if it works for your project.
|
||||||
|
[includes]
|
||||||
|
# Include paths that will be added before all others. Typically, you want to
|
||||||
|
# treat the root of the project as an include, but this may not be necessary.
|
||||||
|
# before = ["."]
|
||||||
|
|
||||||
|
# Paths that should be treated as include roots in relation to the vendor
|
||||||
|
# directory. These will be calculated with the vendor directory nearest the
|
||||||
|
# target package.
|
||||||
|
# vendored = ["github.com/gogo/protobuf"]
|
||||||
|
packages = ["github.com/gogo/protobuf"]
|
||||||
|
|
||||||
|
# Paths that will be added untouched to the end of the includes. We use
|
||||||
|
# `/usr/local/include` to pickup the common install location of protobuf.
|
||||||
|
# This is the default.
|
||||||
|
after = ["/usr/local/include", "/usr/include"]
|
||||||
|
|
||||||
|
# This section maps protobuf imports to Go packages. These will become
|
||||||
|
# `-M` directives in the call to the go protobuf generator.
|
||||||
|
[packages]
|
||||||
|
"gogoproto/gogo.proto" = "github.com/gogo/protobuf/gogoproto"
|
||||||
|
"google/protobuf/any.proto" = "github.com/gogo/protobuf/types"
|
||||||
|
"google/protobuf/descriptor.proto" = "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
|
||||||
|
"google/protobuf/field_mask.proto" = "github.com/gogo/protobuf/types"
|
||||||
|
"google/protobuf/timestamp.proto" = "github.com/gogo/protobuf/types"
|
||||||
|
|
||||||
|
# Aggregrate the API descriptors to lock down API changes.
|
||||||
|
[[descriptors]]
|
||||||
|
prefix = "github.com/containerd/cgroups/stats/v1"
|
||||||
|
target = "stats/v1/metrics.pb.txt"
|
||||||
|
ignore_files = [
|
||||||
|
"google/protobuf/descriptor.proto",
|
||||||
|
"gogoproto/gogo.proto"
|
||||||
|
]
|
||||||
|
[[descriptors]]
|
||||||
|
prefix = "github.com/containerd/cgroups/v2/stats"
|
||||||
|
target = "v2/stats/metrics.pb.txt"
|
||||||
|
ignore_files = [
|
||||||
|
"google/protobuf/descriptor.proto",
|
||||||
|
"gogoproto/gogo.proto"
|
||||||
|
]
|
|
@ -0,0 +1,149 @@
|
||||||
|
# cgroups
|
||||||
|
|
||||||
|
[![Build Status](https://github.com/containerd/cgroups/workflows/CI/badge.svg)](https://github.com/containerd/cgroups/actions?query=workflow%3ACI)
|
||||||
|
[![codecov](https://codecov.io/gh/containerd/cgroups/branch/main/graph/badge.svg)](https://codecov.io/gh/containerd/cgroups)
|
||||||
|
[![GoDoc](https://godoc.org/github.com/containerd/cgroups?status.svg)](https://godoc.org/github.com/containerd/cgroups)
|
||||||
|
[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/cgroups)](https://goreportcard.com/report/github.com/containerd/cgroups)
|
||||||
|
|
||||||
|
Go package for creating, managing, inspecting, and destroying cgroups.
|
||||||
|
The resources format for settings on the cgroup uses the OCI runtime-spec found
|
||||||
|
[here](https://github.com/opencontainers/runtime-spec).
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### Create a new cgroup
|
||||||
|
|
||||||
|
This creates a new cgroup using a static path for all subsystems under `/test`.
|
||||||
|
|
||||||
|
* /sys/fs/cgroup/cpu/test
|
||||||
|
* /sys/fs/cgroup/memory/test
|
||||||
|
* etc....
|
||||||
|
|
||||||
|
It uses a single hierarchy and specifies cpu shares as a resource constraint and
|
||||||
|
uses the v1 implementation of cgroups.
|
||||||
|
|
||||||
|
|
||||||
|
```go
|
||||||
|
shares := uint64(100)
|
||||||
|
control, err := cgroups.New(cgroups.V1, cgroups.StaticPath("/test"), &specs.LinuxResources{
|
||||||
|
CPU: &specs.LinuxCPU{
|
||||||
|
Shares: &shares,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
defer control.Delete()
|
||||||
|
```
|
||||||
|
|
||||||
|
### Create with systemd slice support
|
||||||
|
|
||||||
|
|
||||||
|
```go
|
||||||
|
control, err := cgroups.New(cgroups.Systemd, cgroups.Slice("system.slice", "runc-test"), &specs.LinuxResources{
|
||||||
|
CPU: &specs.CPU{
|
||||||
|
Shares: &shares,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
### Load an existing cgroup
|
||||||
|
|
||||||
|
```go
|
||||||
|
control, err = cgroups.Load(cgroups.V1, cgroups.StaticPath("/test"))
|
||||||
|
```
|
||||||
|
|
||||||
|
### Add a process to the cgroup
|
||||||
|
|
||||||
|
```go
|
||||||
|
if err := control.Add(cgroups.Process{Pid:1234}); err != nil {
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Update the cgroup
|
||||||
|
|
||||||
|
To update the resources applied in the cgroup
|
||||||
|
|
||||||
|
```go
|
||||||
|
shares = uint64(200)
|
||||||
|
if err := control.Update(&specs.LinuxResources{
|
||||||
|
CPU: &specs.LinuxCPU{
|
||||||
|
Shares: &shares,
|
||||||
|
},
|
||||||
|
}); err != nil {
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Freeze and Thaw the cgroup
|
||||||
|
|
||||||
|
```go
|
||||||
|
if err := control.Freeze(); err != nil {
|
||||||
|
}
|
||||||
|
if err := control.Thaw(); err != nil {
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### List all processes in the cgroup or recursively
|
||||||
|
|
||||||
|
```go
|
||||||
|
processes, err := control.Processes(cgroups.Devices, recursive)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Get Stats on the cgroup
|
||||||
|
|
||||||
|
```go
|
||||||
|
stats, err := control.Stat()
|
||||||
|
```
|
||||||
|
|
||||||
|
By adding `cgroups.IgnoreNotExist` all non-existent files will be ignored, e.g. swap memory stats without swap enabled
|
||||||
|
```go
|
||||||
|
stats, err := control.Stat(cgroups.IgnoreNotExist)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Move process across cgroups
|
||||||
|
|
||||||
|
This allows you to take processes from one cgroup and move them to another.
|
||||||
|
|
||||||
|
```go
|
||||||
|
err := control.MoveTo(destination)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Create subcgroup
|
||||||
|
|
||||||
|
```go
|
||||||
|
subCgroup, err := control.New("child", resources)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Registering for memory events
|
||||||
|
|
||||||
|
This allows you to get notified by an eventfd for v1 memory cgroups events.
|
||||||
|
|
||||||
|
```go
|
||||||
|
event := cgroups.MemoryThresholdEvent(50 * 1024 * 1024, false)
|
||||||
|
efd, err := control.RegisterMemoryEvent(event)
|
||||||
|
```
|
||||||
|
|
||||||
|
```go
|
||||||
|
event := cgroups.MemoryPressureEvent(cgroups.MediumPressure, cgroups.DefaultMode)
|
||||||
|
efd, err := control.RegisterMemoryEvent(event)
|
||||||
|
```
|
||||||
|
|
||||||
|
```go
|
||||||
|
efd, err := control.OOMEventFD()
|
||||||
|
// or by using RegisterMemoryEvent
|
||||||
|
event := cgroups.OOMEvent()
|
||||||
|
efd, err := control.RegisterMemoryEvent(event)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Attention
|
||||||
|
|
||||||
|
All static path should not include `/sys/fs/cgroup/` prefix, it should start with your own cgroups name
|
||||||
|
|
||||||
|
## Project details
|
||||||
|
|
||||||
|
Cgroups is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
|
||||||
|
As a containerd sub-project, you will find the:
|
||||||
|
|
||||||
|
* [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md),
|
||||||
|
* [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS),
|
||||||
|
* and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
|
||||||
|
|
||||||
|
information in our [`containerd/project`](https://github.com/containerd/project) repository.
|
|
@ -0,0 +1,46 @@
|
||||||
|
# -*- mode: ruby -*-
|
||||||
|
# vi: set ft=ruby :
|
||||||
|
|
||||||
|
Vagrant.configure("2") do |config|
|
||||||
|
# Fedora box is used for testing cgroup v2 support
|
||||||
|
config.vm.box = "fedora/35-cloud-base"
|
||||||
|
config.vm.provider :virtualbox do |v|
|
||||||
|
v.memory = 4096
|
||||||
|
v.cpus = 2
|
||||||
|
end
|
||||||
|
config.vm.provider :libvirt do |v|
|
||||||
|
v.memory = 4096
|
||||||
|
v.cpus = 2
|
||||||
|
end
|
||||||
|
config.vm.provision "shell", inline: <<-SHELL
|
||||||
|
set -eux -o pipefail
|
||||||
|
# configuration
|
||||||
|
GO_VERSION="1.17.7"
|
||||||
|
|
||||||
|
# install gcc and Golang
|
||||||
|
dnf -y install gcc
|
||||||
|
curl -fsSL "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" | tar Cxz /usr/local
|
||||||
|
|
||||||
|
# setup env vars
|
||||||
|
cat >> /etc/profile.d/sh.local <<EOF
|
||||||
|
PATH=/usr/local/go/bin:$PATH
|
||||||
|
GO111MODULE=on
|
||||||
|
export PATH GO111MODULE
|
||||||
|
EOF
|
||||||
|
source /etc/profile.d/sh.local
|
||||||
|
|
||||||
|
# enter /root/go/src/github.com/containerd/cgroups
|
||||||
|
mkdir -p /root/go/src/github.com/containerd
|
||||||
|
ln -s /vagrant /root/go/src/github.com/containerd/cgroups
|
||||||
|
cd /root/go/src/github.com/containerd/cgroups
|
||||||
|
|
||||||
|
# create /test.sh
|
||||||
|
cat > /test.sh <<EOF
|
||||||
|
#!/bin/bash
|
||||||
|
set -eux -o pipefail
|
||||||
|
cd /root/go/src/github.com/containerd/cgroups
|
||||||
|
go test -v ./...
|
||||||
|
EOF
|
||||||
|
chmod +x /test.sh
|
||||||
|
SHELL
|
||||||
|
end
|
|
@ -0,0 +1,361 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cgroups
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
v1 "github.com/containerd/cgroups/stats/v1"
|
||||||
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewBlkio returns a Blkio controller given the root folder of cgroups.
|
||||||
|
// It may optionally accept other configuration options, such as ProcRoot(path)
|
||||||
|
func NewBlkio(root string, options ...func(controller *blkioController)) *blkioController {
|
||||||
|
ctrl := &blkioController{
|
||||||
|
root: filepath.Join(root, string(Blkio)),
|
||||||
|
procRoot: "/proc",
|
||||||
|
}
|
||||||
|
for _, opt := range options {
|
||||||
|
opt(ctrl)
|
||||||
|
}
|
||||||
|
return ctrl
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProcRoot overrides the default location of the "/proc" filesystem
|
||||||
|
func ProcRoot(path string) func(controller *blkioController) {
|
||||||
|
return func(c *blkioController) {
|
||||||
|
c.procRoot = path
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type blkioController struct {
|
||||||
|
root string
|
||||||
|
procRoot string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blkioController) Name() Name {
|
||||||
|
return Blkio
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blkioController) Path(path string) string {
|
||||||
|
return filepath.Join(b.root, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blkioController) Create(path string, resources *specs.LinuxResources) error {
|
||||||
|
if err := os.MkdirAll(b.Path(path), defaultDirPerm); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if resources.BlockIO == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for _, t := range createBlkioSettings(resources.BlockIO) {
|
||||||
|
if t.value != nil {
|
||||||
|
if err := retryingWriteFile(
|
||||||
|
filepath.Join(b.Path(path), "blkio."+t.name),
|
||||||
|
t.format(t.value),
|
||||||
|
defaultFilePerm,
|
||||||
|
); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blkioController) Update(path string, resources *specs.LinuxResources) error {
|
||||||
|
return b.Create(path, resources)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blkioController) Stat(path string, stats *v1.Metrics) error {
|
||||||
|
stats.Blkio = &v1.BlkIOStat{}
|
||||||
|
|
||||||
|
var settings []blkioStatSettings
|
||||||
|
|
||||||
|
// Try to read CFQ stats available on all CFQ enabled kernels first
|
||||||
|
if _, err := os.Lstat(filepath.Join(b.Path(path), "blkio.io_serviced_recursive")); err == nil {
|
||||||
|
settings = []blkioStatSettings{
|
||||||
|
{
|
||||||
|
name: "sectors_recursive",
|
||||||
|
entry: &stats.Blkio.SectorsRecursive,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "io_service_bytes_recursive",
|
||||||
|
entry: &stats.Blkio.IoServiceBytesRecursive,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "io_serviced_recursive",
|
||||||
|
entry: &stats.Blkio.IoServicedRecursive,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "io_queued_recursive",
|
||||||
|
entry: &stats.Blkio.IoQueuedRecursive,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "io_service_time_recursive",
|
||||||
|
entry: &stats.Blkio.IoServiceTimeRecursive,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "io_wait_time_recursive",
|
||||||
|
entry: &stats.Blkio.IoWaitTimeRecursive,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "io_merged_recursive",
|
||||||
|
entry: &stats.Blkio.IoMergedRecursive,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "time_recursive",
|
||||||
|
entry: &stats.Blkio.IoTimeRecursive,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.Open(filepath.Join(b.procRoot, "partitions"))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
devices, err := getDevices(f)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var size int
|
||||||
|
for _, t := range settings {
|
||||||
|
if err := b.readEntry(devices, path, t.name, t.entry); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
size += len(*t.entry)
|
||||||
|
}
|
||||||
|
if size > 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Even the kernel is compiled with the CFQ scheduler, the cgroup may not use
|
||||||
|
// block devices with the CFQ scheduler. If so, we should fallback to throttle.* files.
|
||||||
|
settings = []blkioStatSettings{
|
||||||
|
{
|
||||||
|
name: "throttle.io_serviced",
|
||||||
|
entry: &stats.Blkio.IoServicedRecursive,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "throttle.io_service_bytes",
|
||||||
|
entry: &stats.Blkio.IoServiceBytesRecursive,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, t := range settings {
|
||||||
|
if err := b.readEntry(devices, path, t.name, t.entry); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blkioController) readEntry(devices map[deviceKey]string, path, name string, entry *[]*v1.BlkIOEntry) error {
|
||||||
|
f, err := os.Open(filepath.Join(b.Path(path), "blkio."+name))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
sc := bufio.NewScanner(f)
|
||||||
|
for sc.Scan() {
|
||||||
|
// format: dev type amount
|
||||||
|
fields := strings.FieldsFunc(sc.Text(), splitBlkIOStatLine)
|
||||||
|
if len(fields) < 3 {
|
||||||
|
if len(fields) == 2 && fields[0] == "Total" {
|
||||||
|
// skip total line
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("invalid line found while parsing %s: %s", path, sc.Text())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
major, err := strconv.ParseUint(fields[0], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
minor, err := strconv.ParseUint(fields[1], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
op := ""
|
||||||
|
valueField := 2
|
||||||
|
if len(fields) == 4 {
|
||||||
|
op = fields[2]
|
||||||
|
valueField = 3
|
||||||
|
}
|
||||||
|
v, err := strconv.ParseUint(fields[valueField], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*entry = append(*entry, &v1.BlkIOEntry{
|
||||||
|
Device: devices[deviceKey{major, minor}],
|
||||||
|
Major: major,
|
||||||
|
Minor: minor,
|
||||||
|
Op: op,
|
||||||
|
Value: v,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return sc.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
func createBlkioSettings(blkio *specs.LinuxBlockIO) []blkioSettings {
|
||||||
|
settings := []blkioSettings{}
|
||||||
|
|
||||||
|
if blkio.Weight != nil {
|
||||||
|
settings = append(settings,
|
||||||
|
blkioSettings{
|
||||||
|
name: "weight",
|
||||||
|
value: blkio.Weight,
|
||||||
|
format: uintf,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if blkio.LeafWeight != nil {
|
||||||
|
settings = append(settings,
|
||||||
|
blkioSettings{
|
||||||
|
name: "leaf_weight",
|
||||||
|
value: blkio.LeafWeight,
|
||||||
|
format: uintf,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
for _, wd := range blkio.WeightDevice {
|
||||||
|
if wd.Weight != nil {
|
||||||
|
settings = append(settings,
|
||||||
|
blkioSettings{
|
||||||
|
name: "weight_device",
|
||||||
|
value: wd,
|
||||||
|
format: weightdev,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if wd.LeafWeight != nil {
|
||||||
|
settings = append(settings,
|
||||||
|
blkioSettings{
|
||||||
|
name: "leaf_weight_device",
|
||||||
|
value: wd,
|
||||||
|
format: weightleafdev,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, t := range []struct {
|
||||||
|
name string
|
||||||
|
list []specs.LinuxThrottleDevice
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "throttle.read_bps_device",
|
||||||
|
list: blkio.ThrottleReadBpsDevice,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "throttle.read_iops_device",
|
||||||
|
list: blkio.ThrottleReadIOPSDevice,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "throttle.write_bps_device",
|
||||||
|
list: blkio.ThrottleWriteBpsDevice,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "throttle.write_iops_device",
|
||||||
|
list: blkio.ThrottleWriteIOPSDevice,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
for _, td := range t.list {
|
||||||
|
settings = append(settings, blkioSettings{
|
||||||
|
name: t.name,
|
||||||
|
value: td,
|
||||||
|
format: throttleddev,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return settings
|
||||||
|
}
|
||||||
|
|
||||||
|
type blkioSettings struct {
|
||||||
|
name string
|
||||||
|
value interface{}
|
||||||
|
format func(v interface{}) []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type blkioStatSettings struct {
|
||||||
|
name string
|
||||||
|
entry *[]*v1.BlkIOEntry
|
||||||
|
}
|
||||||
|
|
||||||
|
func uintf(v interface{}) []byte {
|
||||||
|
return []byte(strconv.FormatUint(uint64(*v.(*uint16)), 10))
|
||||||
|
}
|
||||||
|
|
||||||
|
func weightdev(v interface{}) []byte {
|
||||||
|
wd := v.(specs.LinuxWeightDevice)
|
||||||
|
return []byte(fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, *wd.Weight))
|
||||||
|
}
|
||||||
|
|
||||||
|
func weightleafdev(v interface{}) []byte {
|
||||||
|
wd := v.(specs.LinuxWeightDevice)
|
||||||
|
return []byte(fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, *wd.LeafWeight))
|
||||||
|
}
|
||||||
|
|
||||||
|
func throttleddev(v interface{}) []byte {
|
||||||
|
td := v.(specs.LinuxThrottleDevice)
|
||||||
|
return []byte(fmt.Sprintf("%d:%d %d", td.Major, td.Minor, td.Rate))
|
||||||
|
}
|
||||||
|
|
||||||
|
func splitBlkIOStatLine(r rune) bool {
|
||||||
|
return r == ' ' || r == ':'
|
||||||
|
}
|
||||||
|
|
||||||
|
type deviceKey struct {
|
||||||
|
major, minor uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// getDevices makes a best effort attempt to read all the devices into a map
|
||||||
|
// keyed by major and minor number. Since devices may be mapped multiple times,
|
||||||
|
// we err on taking the first occurrence.
|
||||||
|
func getDevices(r io.Reader) (map[deviceKey]string, error) {
|
||||||
|
|
||||||
|
var (
|
||||||
|
s = bufio.NewScanner(r)
|
||||||
|
devices = make(map[deviceKey]string)
|
||||||
|
)
|
||||||
|
for i := 0; s.Scan(); i++ {
|
||||||
|
if i < 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fields := strings.Fields(s.Text())
|
||||||
|
major, err := strconv.Atoi(fields[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
minor, err := strconv.Atoi(fields[1])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
key := deviceKey{
|
||||||
|
major: uint64(major),
|
||||||
|
minor: uint64(minor),
|
||||||
|
}
|
||||||
|
if _, ok := devices[key]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
devices[key] = filepath.Join("/dev", fields[3])
|
||||||
|
}
|
||||||
|
return devices, s.Err()
|
||||||
|
}
|
|
@ -0,0 +1,533 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cgroups
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
v1 "github.com/containerd/cgroups/stats/v1"
|
||||||
|
|
||||||
|
"github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
// New returns a new control via the cgroup cgroups interface
|
||||||
|
func New(hierarchy Hierarchy, path Path, resources *specs.LinuxResources, opts ...InitOpts) (Cgroup, error) {
|
||||||
|
config := newInitConfig()
|
||||||
|
for _, o := range opts {
|
||||||
|
if err := o(config); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
subsystems, err := hierarchy()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var active []Subsystem
|
||||||
|
for _, s := range subsystems {
|
||||||
|
// check if subsystem exists
|
||||||
|
if err := initializeSubsystem(s, path, resources); err != nil {
|
||||||
|
if err == ErrControllerNotActive {
|
||||||
|
if config.InitCheck != nil {
|
||||||
|
if skerr := config.InitCheck(s, path, err); skerr != nil {
|
||||||
|
if skerr != ErrIgnoreSubsystem {
|
||||||
|
return nil, skerr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
active = append(active, s)
|
||||||
|
}
|
||||||
|
return &cgroup{
|
||||||
|
path: path,
|
||||||
|
subsystems: active,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load will load an existing cgroup and allow it to be controlled
|
||||||
|
// All static path should not include `/sys/fs/cgroup/` prefix, it should start with your own cgroups name
|
||||||
|
func Load(hierarchy Hierarchy, path Path, opts ...InitOpts) (Cgroup, error) {
|
||||||
|
config := newInitConfig()
|
||||||
|
for _, o := range opts {
|
||||||
|
if err := o(config); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var activeSubsystems []Subsystem
|
||||||
|
subsystems, err := hierarchy()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// check that the subsystems still exist, and keep only those that actually exist
|
||||||
|
for _, s := range pathers(subsystems) {
|
||||||
|
p, err := path(s.Name())
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
|
return nil, ErrCgroupDeleted
|
||||||
|
}
|
||||||
|
if err == ErrControllerNotActive {
|
||||||
|
if config.InitCheck != nil {
|
||||||
|
if skerr := config.InitCheck(s, path, err); skerr != nil {
|
||||||
|
if skerr != ErrIgnoreSubsystem {
|
||||||
|
return nil, skerr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if _, err := os.Lstat(s.Path(p)); err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
activeSubsystems = append(activeSubsystems, s)
|
||||||
|
}
|
||||||
|
// if we do not have any active systems then the cgroup is deleted
|
||||||
|
if len(activeSubsystems) == 0 {
|
||||||
|
return nil, ErrCgroupDeleted
|
||||||
|
}
|
||||||
|
return &cgroup{
|
||||||
|
path: path,
|
||||||
|
subsystems: activeSubsystems,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type cgroup struct {
|
||||||
|
path Path
|
||||||
|
|
||||||
|
subsystems []Subsystem
|
||||||
|
mu sync.Mutex
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a new sub cgroup
|
||||||
|
func (c *cgroup) New(name string, resources *specs.LinuxResources) (Cgroup, error) {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
if c.err != nil {
|
||||||
|
return nil, c.err
|
||||||
|
}
|
||||||
|
path := subPath(c.path, name)
|
||||||
|
for _, s := range c.subsystems {
|
||||||
|
if err := initializeSubsystem(s, path, resources); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &cgroup{
|
||||||
|
path: path,
|
||||||
|
subsystems: c.subsystems,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subsystems returns all the subsystems that are currently being
|
||||||
|
// consumed by the group
|
||||||
|
func (c *cgroup) Subsystems() []Subsystem {
|
||||||
|
return c.subsystems
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cgroup) subsystemsFilter(subsystems ...Name) []Subsystem {
|
||||||
|
if len(subsystems) == 0 {
|
||||||
|
return c.subsystems
|
||||||
|
}
|
||||||
|
|
||||||
|
var filteredSubsystems = []Subsystem{}
|
||||||
|
for _, s := range c.subsystems {
|
||||||
|
for _, f := range subsystems {
|
||||||
|
if s.Name() == f {
|
||||||
|
filteredSubsystems = append(filteredSubsystems, s)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return filteredSubsystems
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add moves the provided process into the new cgroup.
|
||||||
|
// Without additional arguments, the process is added to all the cgroup subsystems.
|
||||||
|
// When giving Add a list of subsystem names, the process is only added to those
|
||||||
|
// subsystems, provided that they are active in the targeted cgroup.
|
||||||
|
func (c *cgroup) Add(process Process, subsystems ...Name) error {
|
||||||
|
return c.add(process, cgroupProcs, subsystems...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddProc moves the provided process id into the new cgroup.
|
||||||
|
// Without additional arguments, the process with the given id is added to all
|
||||||
|
// the cgroup subsystems. When giving AddProc a list of subsystem names, the process
|
||||||
|
// id is only added to those subsystems, provided that they are active in the targeted
|
||||||
|
// cgroup.
|
||||||
|
func (c *cgroup) AddProc(pid uint64, subsystems ...Name) error {
|
||||||
|
return c.add(Process{Pid: int(pid)}, cgroupProcs, subsystems...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddTask moves the provided tasks (threads) into the new cgroup.
|
||||||
|
// Without additional arguments, the task is added to all the cgroup subsystems.
|
||||||
|
// When giving AddTask a list of subsystem names, the task is only added to those
|
||||||
|
// subsystems, provided that they are active in the targeted cgroup.
|
||||||
|
func (c *cgroup) AddTask(process Process, subsystems ...Name) error {
|
||||||
|
return c.add(process, cgroupTasks, subsystems...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cgroup) add(process Process, pType procType, subsystems ...Name) error {
|
||||||
|
if process.Pid <= 0 {
|
||||||
|
return ErrInvalidPid
|
||||||
|
}
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
if c.err != nil {
|
||||||
|
return c.err
|
||||||
|
}
|
||||||
|
for _, s := range pathers(c.subsystemsFilter(subsystems...)) {
|
||||||
|
p, err := c.path(s.Name())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = retryingWriteFile(
|
||||||
|
filepath.Join(s.Path(p), pType),
|
||||||
|
[]byte(strconv.Itoa(process.Pid)),
|
||||||
|
defaultFilePerm,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete will remove the control group from each of the subsystems registered
|
||||||
|
func (c *cgroup) Delete() error {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
if c.err != nil {
|
||||||
|
return c.err
|
||||||
|
}
|
||||||
|
var errs []string
|
||||||
|
for _, s := range c.subsystems {
|
||||||
|
if d, ok := s.(deleter); ok {
|
||||||
|
sp, err := c.path(s.Name())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := d.Delete(sp); err != nil {
|
||||||
|
errs = append(errs, string(s.Name()))
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if p, ok := s.(pather); ok {
|
||||||
|
sp, err := c.path(s.Name())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
path := p.Path(sp)
|
||||||
|
if err := remove(path); err != nil {
|
||||||
|
errs = append(errs, path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(errs) > 0 {
|
||||||
|
return fmt.Errorf("cgroups: unable to remove paths %s", strings.Join(errs, ", "))
|
||||||
|
}
|
||||||
|
c.err = ErrCgroupDeleted
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stat returns the current metrics for the cgroup
|
||||||
|
func (c *cgroup) Stat(handlers ...ErrorHandler) (*v1.Metrics, error) {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
if c.err != nil {
|
||||||
|
return nil, c.err
|
||||||
|
}
|
||||||
|
if len(handlers) == 0 {
|
||||||
|
handlers = append(handlers, errPassthrough)
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
stats = &v1.Metrics{
|
||||||
|
CPU: &v1.CPUStat{
|
||||||
|
Throttling: &v1.Throttle{},
|
||||||
|
Usage: &v1.CPUUsage{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
wg = &sync.WaitGroup{}
|
||||||
|
errs = make(chan error, len(c.subsystems))
|
||||||
|
)
|
||||||
|
for _, s := range c.subsystems {
|
||||||
|
if ss, ok := s.(stater); ok {
|
||||||
|
sp, err := c.path(s.Name())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
if err := ss.Stat(sp, stats); err != nil {
|
||||||
|
for _, eh := range handlers {
|
||||||
|
if herr := eh(err); herr != nil {
|
||||||
|
errs <- herr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
close(errs)
|
||||||
|
for err := range errs {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return stats, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update updates the cgroup with the new resource values provided
|
||||||
|
//
|
||||||
|
// Be prepared to handle EBUSY when trying to update a cgroup with
|
||||||
|
// live processes and other operations like Stats being performed at the
|
||||||
|
// same time
|
||||||
|
func (c *cgroup) Update(resources *specs.LinuxResources) error {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
if c.err != nil {
|
||||||
|
return c.err
|
||||||
|
}
|
||||||
|
for _, s := range c.subsystems {
|
||||||
|
if u, ok := s.(updater); ok {
|
||||||
|
sp, err := c.path(s.Name())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := u.Update(sp, resources); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Processes returns the processes running inside the cgroup along
|
||||||
|
// with the subsystem used, pid, and path
|
||||||
|
func (c *cgroup) Processes(subsystem Name, recursive bool) ([]Process, error) {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
if c.err != nil {
|
||||||
|
return nil, c.err
|
||||||
|
}
|
||||||
|
return c.processes(subsystem, recursive, cgroupProcs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tasks returns the tasks running inside the cgroup along
|
||||||
|
// with the subsystem used, pid, and path
|
||||||
|
func (c *cgroup) Tasks(subsystem Name, recursive bool) ([]Task, error) {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
if c.err != nil {
|
||||||
|
return nil, c.err
|
||||||
|
}
|
||||||
|
return c.processes(subsystem, recursive, cgroupTasks)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cgroup) processes(subsystem Name, recursive bool, pType procType) ([]Process, error) {
|
||||||
|
s := c.getSubsystem(subsystem)
|
||||||
|
sp, err := c.path(subsystem)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if s == nil {
|
||||||
|
return nil, fmt.Errorf("cgroups: %s doesn't exist in %s subsystem", sp, subsystem)
|
||||||
|
}
|
||||||
|
path := s.(pather).Path(sp)
|
||||||
|
var processes []Process
|
||||||
|
err = filepath.Walk(path, func(p string, info os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !recursive && info.IsDir() {
|
||||||
|
if p == path {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return filepath.SkipDir
|
||||||
|
}
|
||||||
|
dir, name := filepath.Split(p)
|
||||||
|
if name != pType {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
procs, err := readPids(dir, subsystem, pType)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
processes = append(processes, procs...)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
return processes, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Freeze freezes the entire cgroup and all the processes inside it
|
||||||
|
func (c *cgroup) Freeze() error {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
if c.err != nil {
|
||||||
|
return c.err
|
||||||
|
}
|
||||||
|
s := c.getSubsystem(Freezer)
|
||||||
|
if s == nil {
|
||||||
|
return ErrFreezerNotSupported
|
||||||
|
}
|
||||||
|
sp, err := c.path(Freezer)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return s.(*freezerController).Freeze(sp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Thaw thaws out the cgroup and all the processes inside it
|
||||||
|
func (c *cgroup) Thaw() error {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
if c.err != nil {
|
||||||
|
return c.err
|
||||||
|
}
|
||||||
|
s := c.getSubsystem(Freezer)
|
||||||
|
if s == nil {
|
||||||
|
return ErrFreezerNotSupported
|
||||||
|
}
|
||||||
|
sp, err := c.path(Freezer)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return s.(*freezerController).Thaw(sp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OOMEventFD returns the memory cgroup's out of memory event fd that triggers
|
||||||
|
// when processes inside the cgroup receive an oom event. Returns
|
||||||
|
// ErrMemoryNotSupported if memory cgroups is not supported.
|
||||||
|
func (c *cgroup) OOMEventFD() (uintptr, error) {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
if c.err != nil {
|
||||||
|
return 0, c.err
|
||||||
|
}
|
||||||
|
s := c.getSubsystem(Memory)
|
||||||
|
if s == nil {
|
||||||
|
return 0, ErrMemoryNotSupported
|
||||||
|
}
|
||||||
|
sp, err := c.path(Memory)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return s.(*memoryController).memoryEvent(sp, OOMEvent())
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterMemoryEvent allows the ability to register for all v1 memory cgroups
|
||||||
|
// notifications.
|
||||||
|
func (c *cgroup) RegisterMemoryEvent(event MemoryEvent) (uintptr, error) {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
if c.err != nil {
|
||||||
|
return 0, c.err
|
||||||
|
}
|
||||||
|
s := c.getSubsystem(Memory)
|
||||||
|
if s == nil {
|
||||||
|
return 0, ErrMemoryNotSupported
|
||||||
|
}
|
||||||
|
sp, err := c.path(Memory)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return s.(*memoryController).memoryEvent(sp, event)
|
||||||
|
}
|
||||||
|
|
||||||
|
// State returns the state of the cgroup and its processes
|
||||||
|
func (c *cgroup) State() State {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
c.checkExists()
|
||||||
|
if c.err != nil && c.err == ErrCgroupDeleted {
|
||||||
|
return Deleted
|
||||||
|
}
|
||||||
|
s := c.getSubsystem(Freezer)
|
||||||
|
if s == nil {
|
||||||
|
return Thawed
|
||||||
|
}
|
||||||
|
sp, err := c.path(Freezer)
|
||||||
|
if err != nil {
|
||||||
|
return Unknown
|
||||||
|
}
|
||||||
|
state, err := s.(*freezerController).state(sp)
|
||||||
|
if err != nil {
|
||||||
|
return Unknown
|
||||||
|
}
|
||||||
|
return state
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveTo does a recursive move subsystem by subsystem of all the processes
|
||||||
|
// inside the group
|
||||||
|
func (c *cgroup) MoveTo(destination Cgroup) error {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
if c.err != nil {
|
||||||
|
return c.err
|
||||||
|
}
|
||||||
|
for _, s := range c.subsystems {
|
||||||
|
processes, err := c.processes(s.Name(), true, cgroupProcs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, p := range processes {
|
||||||
|
if err := destination.Add(p); err != nil {
|
||||||
|
if strings.Contains(err.Error(), "no such process") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cgroup) getSubsystem(n Name) Subsystem {
|
||||||
|
for _, s := range c.subsystems {
|
||||||
|
if s.Name() == n {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cgroup) checkExists() {
|
||||||
|
for _, s := range pathers(c.subsystems) {
|
||||||
|
p, err := c.path(s.Name())
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, err := os.Lstat(s.Path(p)); err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
c.err = ErrCgroupDeleted
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,99 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cgroups
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
v1 "github.com/containerd/cgroups/stats/v1"
|
||||||
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
type procType = string
|
||||||
|
|
||||||
|
const (
|
||||||
|
cgroupProcs procType = "cgroup.procs"
|
||||||
|
cgroupTasks procType = "tasks"
|
||||||
|
defaultDirPerm = 0755
|
||||||
|
)
|
||||||
|
|
||||||
|
// defaultFilePerm is a var so that the test framework can change the filemode
|
||||||
|
// of all files created when the tests are running. The difference between the
|
||||||
|
// tests and real world use is that files like "cgroup.procs" will exist when writing
|
||||||
|
// to a read cgroup filesystem and do not exist prior when running in the tests.
|
||||||
|
// this is set to a non 0 value in the test code
|
||||||
|
var defaultFilePerm = os.FileMode(0)
|
||||||
|
|
||||||
|
type Process struct {
|
||||||
|
// Subsystem is the name of the subsystem that the process / task is in.
|
||||||
|
Subsystem Name
|
||||||
|
// Pid is the process id of the process / task.
|
||||||
|
Pid int
|
||||||
|
// Path is the full path of the subsystem and location that the process / task is in.
|
||||||
|
Path string
|
||||||
|
}
|
||||||
|
|
||||||
|
type Task = Process
|
||||||
|
|
||||||
|
// Cgroup handles interactions with the individual groups to perform
|
||||||
|
// actions on them as them main interface to this cgroup package
|
||||||
|
type Cgroup interface {
|
||||||
|
// New creates a new cgroup under the calling cgroup
|
||||||
|
New(string, *specs.LinuxResources) (Cgroup, error)
|
||||||
|
// Add adds a process to the cgroup (cgroup.procs). Without additional arguments,
|
||||||
|
// the process is added to all the cgroup subsystems. When giving Add a list of
|
||||||
|
// subsystem names, the process is only added to those subsystems, provided that
|
||||||
|
// they are active in the targeted cgroup.
|
||||||
|
Add(Process, ...Name) error
|
||||||
|
// AddProc adds the process with the given id to the cgroup (cgroup.procs).
|
||||||
|
// Without additional arguments, the process with the given id is added to all
|
||||||
|
// the cgroup subsystems. When giving AddProc a list of subsystem names, the process
|
||||||
|
// id is only added to those subsystems, provided that they are active in the targeted
|
||||||
|
// cgroup.
|
||||||
|
AddProc(uint64, ...Name) error
|
||||||
|
// AddTask adds a process to the cgroup (tasks). Without additional arguments, the
|
||||||
|
// task is added to all the cgroup subsystems. When giving AddTask a list of subsystem
|
||||||
|
// names, the task is only added to those subsystems, provided that they are active in
|
||||||
|
// the targeted cgroup.
|
||||||
|
AddTask(Process, ...Name) error
|
||||||
|
// Delete removes the cgroup as a whole
|
||||||
|
Delete() error
|
||||||
|
// MoveTo moves all the processes under the calling cgroup to the provided one
|
||||||
|
// subsystems are moved one at a time
|
||||||
|
MoveTo(Cgroup) error
|
||||||
|
// Stat returns the stats for all subsystems in the cgroup
|
||||||
|
Stat(...ErrorHandler) (*v1.Metrics, error)
|
||||||
|
// Update updates all the subsystems with the provided resource changes
|
||||||
|
Update(resources *specs.LinuxResources) error
|
||||||
|
// Processes returns all the processes in a select subsystem for the cgroup
|
||||||
|
Processes(Name, bool) ([]Process, error)
|
||||||
|
// Tasks returns all the tasks in a select subsystem for the cgroup
|
||||||
|
Tasks(Name, bool) ([]Task, error)
|
||||||
|
// Freeze freezes or pauses all processes inside the cgroup
|
||||||
|
Freeze() error
|
||||||
|
// Thaw thaw or resumes all processes inside the cgroup
|
||||||
|
Thaw() error
|
||||||
|
// OOMEventFD returns the memory subsystem's event fd for OOM events
|
||||||
|
OOMEventFD() (uintptr, error)
|
||||||
|
// RegisterMemoryEvent returns the memory subsystems event fd for whatever memory event was
|
||||||
|
// registered for. Can alternatively register for the oom event with this method.
|
||||||
|
RegisterMemoryEvent(MemoryEvent) (uintptr, error)
|
||||||
|
// State returns the cgroups current state
|
||||||
|
State() State
|
||||||
|
// Subsystems returns all the subsystems in the cgroup
|
||||||
|
Subsystems() []Subsystem
|
||||||
|
}
|
|
@ -0,0 +1,125 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cgroups
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
v1 "github.com/containerd/cgroups/stats/v1"
|
||||||
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewCpu(root string) *cpuController {
|
||||||
|
return &cpuController{
|
||||||
|
root: filepath.Join(root, string(Cpu)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type cpuController struct {
|
||||||
|
root string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cpuController) Name() Name {
|
||||||
|
return Cpu
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cpuController) Path(path string) string {
|
||||||
|
return filepath.Join(c.root, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cpuController) Create(path string, resources *specs.LinuxResources) error {
|
||||||
|
if err := os.MkdirAll(c.Path(path), defaultDirPerm); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if cpu := resources.CPU; cpu != nil {
|
||||||
|
for _, t := range []struct {
|
||||||
|
name string
|
||||||
|
ivalue *int64
|
||||||
|
uvalue *uint64
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "rt_period_us",
|
||||||
|
uvalue: cpu.RealtimePeriod,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "rt_runtime_us",
|
||||||
|
ivalue: cpu.RealtimeRuntime,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "shares",
|
||||||
|
uvalue: cpu.Shares,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "cfs_period_us",
|
||||||
|
uvalue: cpu.Period,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "cfs_quota_us",
|
||||||
|
ivalue: cpu.Quota,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
var value []byte
|
||||||
|
if t.uvalue != nil {
|
||||||
|
value = []byte(strconv.FormatUint(*t.uvalue, 10))
|
||||||
|
} else if t.ivalue != nil {
|
||||||
|
value = []byte(strconv.FormatInt(*t.ivalue, 10))
|
||||||
|
}
|
||||||
|
if value != nil {
|
||||||
|
if err := retryingWriteFile(
|
||||||
|
filepath.Join(c.Path(path), "cpu."+t.name),
|
||||||
|
value,
|
||||||
|
defaultFilePerm,
|
||||||
|
); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cpuController) Update(path string, resources *specs.LinuxResources) error {
|
||||||
|
return c.Create(path, resources)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cpuController) Stat(path string, stats *v1.Metrics) error {
|
||||||
|
f, err := os.Open(filepath.Join(c.Path(path), "cpu.stat"))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
// get or create the cpu field because cpuacct can also set values on this struct
|
||||||
|
sc := bufio.NewScanner(f)
|
||||||
|
for sc.Scan() {
|
||||||
|
key, v, err := parseKV(sc.Text())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch key {
|
||||||
|
case "nr_periods":
|
||||||
|
stats.CPU.Throttling.Periods = v
|
||||||
|
case "nr_throttled":
|
||||||
|
stats.CPU.Throttling.ThrottledPeriods = v
|
||||||
|
case "throttled_time":
|
||||||
|
stats.CPU.Throttling.ThrottledTime = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return sc.Err()
|
||||||
|
}
|
|
@ -0,0 +1,123 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cgroups
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
v1 "github.com/containerd/cgroups/stats/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
const nanosecondsInSecond = 1000000000
|
||||||
|
|
||||||
|
var clockTicks = getClockTicks()
|
||||||
|
|
||||||
|
func NewCpuacct(root string) *cpuacctController {
|
||||||
|
return &cpuacctController{
|
||||||
|
root: filepath.Join(root, string(Cpuacct)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type cpuacctController struct {
|
||||||
|
root string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cpuacctController) Name() Name {
|
||||||
|
return Cpuacct
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cpuacctController) Path(path string) string {
|
||||||
|
return filepath.Join(c.root, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cpuacctController) Stat(path string, stats *v1.Metrics) error {
|
||||||
|
user, kernel, err := c.getUsage(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
total, err := readUint(filepath.Join(c.Path(path), "cpuacct.usage"))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
percpu, err := c.percpuUsage(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
stats.CPU.Usage.Total = total
|
||||||
|
stats.CPU.Usage.User = user
|
||||||
|
stats.CPU.Usage.Kernel = kernel
|
||||||
|
stats.CPU.Usage.PerCPU = percpu
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cpuacctController) percpuUsage(path string) ([]uint64, error) {
|
||||||
|
var usage []uint64
|
||||||
|
data, err := ioutil.ReadFile(filepath.Join(c.Path(path), "cpuacct.usage_percpu"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, v := range strings.Fields(string(data)) {
|
||||||
|
u, err := strconv.ParseUint(v, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
usage = append(usage, u)
|
||||||
|
}
|
||||||
|
return usage, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cpuacctController) getUsage(path string) (user uint64, kernel uint64, err error) {
|
||||||
|
statPath := filepath.Join(c.Path(path), "cpuacct.stat")
|
||||||
|
data, err := ioutil.ReadFile(statPath)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, err
|
||||||
|
}
|
||||||
|
fields := strings.Fields(string(data))
|
||||||
|
if len(fields) != 4 {
|
||||||
|
return 0, 0, fmt.Errorf("%q is expected to have 4 fields", statPath)
|
||||||
|
}
|
||||||
|
for _, t := range []struct {
|
||||||
|
index int
|
||||||
|
name string
|
||||||
|
value *uint64
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
index: 0,
|
||||||
|
name: "user",
|
||||||
|
value: &user,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
index: 2,
|
||||||
|
name: "system",
|
||||||
|
value: &kernel,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
if fields[t.index] != t.name {
|
||||||
|
return 0, 0, fmt.Errorf("expected field %q but found %q in %q", t.name, fields[t.index], statPath)
|
||||||
|
}
|
||||||
|
v, err := strconv.ParseUint(fields[t.index+1], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, err
|
||||||
|
}
|
||||||
|
*t.value = v
|
||||||
|
}
|
||||||
|
return (user * nanosecondsInSecond) / clockTicks, (kernel * nanosecondsInSecond) / clockTicks, nil
|
||||||
|
}
|
|
@ -0,0 +1,159 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cgroups
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewCpuset(root string) *cpusetController {
|
||||||
|
return &cpusetController{
|
||||||
|
root: filepath.Join(root, string(Cpuset)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type cpusetController struct {
|
||||||
|
root string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cpusetController) Name() Name {
|
||||||
|
return Cpuset
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cpusetController) Path(path string) string {
|
||||||
|
return filepath.Join(c.root, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cpusetController) Create(path string, resources *specs.LinuxResources) error {
|
||||||
|
if err := c.ensureParent(c.Path(path), c.root); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := os.MkdirAll(c.Path(path), defaultDirPerm); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := c.copyIfNeeded(c.Path(path), filepath.Dir(c.Path(path))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if resources.CPU != nil {
|
||||||
|
for _, t := range []struct {
|
||||||
|
name string
|
||||||
|
value string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "cpus",
|
||||||
|
value: resources.CPU.Cpus,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "mems",
|
||||||
|
value: resources.CPU.Mems,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
if t.value != "" {
|
||||||
|
if err := retryingWriteFile(
|
||||||
|
filepath.Join(c.Path(path), "cpuset."+t.name),
|
||||||
|
[]byte(t.value),
|
||||||
|
defaultFilePerm,
|
||||||
|
); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cpusetController) Update(path string, resources *specs.LinuxResources) error {
|
||||||
|
return c.Create(path, resources)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cpusetController) getValues(path string) (cpus []byte, mems []byte, err error) {
|
||||||
|
if cpus, err = ioutil.ReadFile(filepath.Join(path, "cpuset.cpus")); err != nil && !os.IsNotExist(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if mems, err = ioutil.ReadFile(filepath.Join(path, "cpuset.mems")); err != nil && !os.IsNotExist(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return cpus, mems, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensureParent makes sure that the parent directory of current is created
|
||||||
|
// and populated with the proper cpus and mems files copied from
|
||||||
|
// it's parent.
|
||||||
|
func (c *cpusetController) ensureParent(current, root string) error {
|
||||||
|
parent := filepath.Dir(current)
|
||||||
|
if _, err := filepath.Rel(root, parent); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Avoid infinite recursion.
|
||||||
|
if parent == current {
|
||||||
|
return fmt.Errorf("cpuset: cgroup parent path outside cgroup root")
|
||||||
|
}
|
||||||
|
if cleanPath(parent) != root {
|
||||||
|
if err := c.ensureParent(parent, root); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := os.MkdirAll(current, defaultDirPerm); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return c.copyIfNeeded(current, parent)
|
||||||
|
}
|
||||||
|
|
||||||
|
// copyIfNeeded copies the cpuset.cpus and cpuset.mems from the parent
|
||||||
|
// directory to the current directory if the file's contents are 0
|
||||||
|
func (c *cpusetController) copyIfNeeded(current, parent string) error {
|
||||||
|
var (
|
||||||
|
err error
|
||||||
|
currentCpus, currentMems []byte
|
||||||
|
parentCpus, parentMems []byte
|
||||||
|
)
|
||||||
|
if currentCpus, currentMems, err = c.getValues(current); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if parentCpus, parentMems, err = c.getValues(parent); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if isEmpty(currentCpus) {
|
||||||
|
if err := retryingWriteFile(
|
||||||
|
filepath.Join(current, "cpuset.cpus"),
|
||||||
|
parentCpus,
|
||||||
|
defaultFilePerm,
|
||||||
|
); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if isEmpty(currentMems) {
|
||||||
|
if err := retryingWriteFile(
|
||||||
|
filepath.Join(current, "cpuset.mems"),
|
||||||
|
parentMems,
|
||||||
|
defaultFilePerm,
|
||||||
|
); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isEmpty(b []byte) bool {
|
||||||
|
return len(bytes.Trim(b, "\n")) == 0
|
||||||
|
}
|
|
@ -0,0 +1,92 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cgroups
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
allowDeviceFile = "devices.allow"
|
||||||
|
denyDeviceFile = "devices.deny"
|
||||||
|
wildcard = -1
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewDevices(root string) *devicesController {
|
||||||
|
return &devicesController{
|
||||||
|
root: filepath.Join(root, string(Devices)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type devicesController struct {
|
||||||
|
root string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *devicesController) Name() Name {
|
||||||
|
return Devices
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *devicesController) Path(path string) string {
|
||||||
|
return filepath.Join(d.root, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *devicesController) Create(path string, resources *specs.LinuxResources) error {
|
||||||
|
if err := os.MkdirAll(d.Path(path), defaultDirPerm); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, device := range resources.Devices {
|
||||||
|
file := denyDeviceFile
|
||||||
|
if device.Allow {
|
||||||
|
file = allowDeviceFile
|
||||||
|
}
|
||||||
|
if device.Type == "" {
|
||||||
|
device.Type = "a"
|
||||||
|
}
|
||||||
|
if err := retryingWriteFile(
|
||||||
|
filepath.Join(d.Path(path), file),
|
||||||
|
[]byte(deviceString(device)),
|
||||||
|
defaultFilePerm,
|
||||||
|
); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *devicesController) Update(path string, resources *specs.LinuxResources) error {
|
||||||
|
return d.Create(path, resources)
|
||||||
|
}
|
||||||
|
|
||||||
|
func deviceString(device specs.LinuxDeviceCgroup) string {
|
||||||
|
return fmt.Sprintf("%s %s:%s %s",
|
||||||
|
device.Type,
|
||||||
|
deviceNumber(device.Major),
|
||||||
|
deviceNumber(device.Minor),
|
||||||
|
device.Access,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func deviceNumber(number *int64) string {
|
||||||
|
if number == nil || *number == wildcard {
|
||||||
|
return "*"
|
||||||
|
}
|
||||||
|
return fmt.Sprint(*number)
|
||||||
|
}
|
|
@ -0,0 +1,47 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cgroups
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidPid = errors.New("cgroups: pid must be greater than 0")
|
||||||
|
ErrMountPointNotExist = errors.New("cgroups: cgroup mountpoint does not exist")
|
||||||
|
ErrInvalidFormat = errors.New("cgroups: parsing file with invalid format failed")
|
||||||
|
ErrFreezerNotSupported = errors.New("cgroups: freezer cgroup not supported on this system")
|
||||||
|
ErrMemoryNotSupported = errors.New("cgroups: memory cgroup not supported on this system")
|
||||||
|
ErrCgroupDeleted = errors.New("cgroups: cgroup deleted")
|
||||||
|
ErrNoCgroupMountDestination = errors.New("cgroups: cannot find cgroup mount destination")
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrorHandler is a function that handles and acts on errors
|
||||||
|
type ErrorHandler func(err error) error
|
||||||
|
|
||||||
|
// IgnoreNotExist ignores any errors that are for not existing files
|
||||||
|
func IgnoreNotExist(err error) error {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func errPassthrough(err error) error {
|
||||||
|
return err
|
||||||
|
}
|
|
@ -0,0 +1,82 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cgroups
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewFreezer(root string) *freezerController {
|
||||||
|
return &freezerController{
|
||||||
|
root: filepath.Join(root, string(Freezer)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type freezerController struct {
|
||||||
|
root string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *freezerController) Name() Name {
|
||||||
|
return Freezer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *freezerController) Path(path string) string {
|
||||||
|
return filepath.Join(f.root, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *freezerController) Freeze(path string) error {
|
||||||
|
return f.waitState(path, Frozen)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *freezerController) Thaw(path string) error {
|
||||||
|
return f.waitState(path, Thawed)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *freezerController) changeState(path string, state State) error {
|
||||||
|
return retryingWriteFile(
|
||||||
|
filepath.Join(f.root, path, "freezer.state"),
|
||||||
|
[]byte(strings.ToUpper(string(state))),
|
||||||
|
defaultFilePerm,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *freezerController) state(path string) (State, error) {
|
||||||
|
current, err := ioutil.ReadFile(filepath.Join(f.root, path, "freezer.state"))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return State(strings.ToLower(strings.TrimSpace(string(current)))), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *freezerController) waitState(path string, state State) error {
|
||||||
|
for {
|
||||||
|
if err := f.changeState(path, state); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
current, err := f.state(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if current == state {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
time.Sleep(1 * time.Millisecond)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,20 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cgroups
|
||||||
|
|
||||||
|
// Hierarchy enables both unified and split hierarchy for cgroups
|
||||||
|
type Hierarchy func() ([]Subsystem, error)
|
|
@ -0,0 +1,109 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cgroups
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
v1 "github.com/containerd/cgroups/stats/v1"
|
||||||
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewHugetlb(root string) (*hugetlbController, error) {
|
||||||
|
sizes, err := hugePageSizes()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &hugetlbController{
|
||||||
|
root: filepath.Join(root, string(Hugetlb)),
|
||||||
|
sizes: sizes,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type hugetlbController struct {
|
||||||
|
root string
|
||||||
|
sizes []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *hugetlbController) Name() Name {
|
||||||
|
return Hugetlb
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *hugetlbController) Path(path string) string {
|
||||||
|
return filepath.Join(h.root, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *hugetlbController) Create(path string, resources *specs.LinuxResources) error {
|
||||||
|
if err := os.MkdirAll(h.Path(path), defaultDirPerm); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, limit := range resources.HugepageLimits {
|
||||||
|
if err := retryingWriteFile(
|
||||||
|
filepath.Join(h.Path(path), strings.Join([]string{"hugetlb", limit.Pagesize, "limit_in_bytes"}, ".")),
|
||||||
|
[]byte(strconv.FormatUint(limit.Limit, 10)),
|
||||||
|
defaultFilePerm,
|
||||||
|
); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *hugetlbController) Stat(path string, stats *v1.Metrics) error {
|
||||||
|
for _, size := range h.sizes {
|
||||||
|
s, err := h.readSizeStat(path, size)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
stats.Hugetlb = append(stats.Hugetlb, s)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *hugetlbController) readSizeStat(path, size string) (*v1.HugetlbStat, error) {
|
||||||
|
s := v1.HugetlbStat{
|
||||||
|
Pagesize: size,
|
||||||
|
}
|
||||||
|
for _, t := range []struct {
|
||||||
|
name string
|
||||||
|
value *uint64
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "usage_in_bytes",
|
||||||
|
value: &s.Usage,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "max_usage_in_bytes",
|
||||||
|
value: &s.Max,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "failcnt",
|
||||||
|
value: &s.Failcnt,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
v, err := readUint(filepath.Join(h.Path(path), strings.Join([]string{"hugetlb", size, t.name}, ".")))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
*t.value = v
|
||||||
|
}
|
||||||
|
return &s, nil
|
||||||
|
}
|
|
@ -0,0 +1,480 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cgroups
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
v1 "github.com/containerd/cgroups/stats/v1"
|
||||||
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MemoryEvent is an interface that V1 memory Cgroup notifications implement. Arg returns the
|
||||||
|
// file name whose fd should be written to "cgroups.event_control". EventFile returns the name of
|
||||||
|
// the file that supports the notification api e.g. "memory.usage_in_bytes".
|
||||||
|
type MemoryEvent interface {
|
||||||
|
Arg() string
|
||||||
|
EventFile() string
|
||||||
|
}
|
||||||
|
|
||||||
|
type memoryThresholdEvent struct {
|
||||||
|
threshold uint64
|
||||||
|
swap bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// MemoryThresholdEvent returns a new memory threshold event to be used with RegisterMemoryEvent.
|
||||||
|
// If swap is true, the event will be registered using memory.memsw.usage_in_bytes
|
||||||
|
func MemoryThresholdEvent(threshold uint64, swap bool) MemoryEvent {
|
||||||
|
return &memoryThresholdEvent{
|
||||||
|
threshold,
|
||||||
|
swap,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memoryThresholdEvent) Arg() string {
|
||||||
|
return strconv.FormatUint(m.threshold, 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memoryThresholdEvent) EventFile() string {
|
||||||
|
if m.swap {
|
||||||
|
return "memory.memsw.usage_in_bytes"
|
||||||
|
}
|
||||||
|
return "memory.usage_in_bytes"
|
||||||
|
}
|
||||||
|
|
||||||
|
type oomEvent struct{}
|
||||||
|
|
||||||
|
// OOMEvent returns a new oom event to be used with RegisterMemoryEvent.
|
||||||
|
func OOMEvent() MemoryEvent {
|
||||||
|
return &oomEvent{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (oom *oomEvent) Arg() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (oom *oomEvent) EventFile() string {
|
||||||
|
return "memory.oom_control"
|
||||||
|
}
|
||||||
|
|
||||||
|
type memoryPressureEvent struct {
|
||||||
|
pressureLevel MemoryPressureLevel
|
||||||
|
hierarchy EventNotificationMode
|
||||||
|
}
|
||||||
|
|
||||||
|
// MemoryPressureEvent returns a new memory pressure event to be used with RegisterMemoryEvent.
|
||||||
|
func MemoryPressureEvent(pressureLevel MemoryPressureLevel, hierarchy EventNotificationMode) MemoryEvent {
|
||||||
|
return &memoryPressureEvent{
|
||||||
|
pressureLevel,
|
||||||
|
hierarchy,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memoryPressureEvent) Arg() string {
|
||||||
|
return string(m.pressureLevel) + "," + string(m.hierarchy)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memoryPressureEvent) EventFile() string {
|
||||||
|
return "memory.pressure_level"
|
||||||
|
}
|
||||||
|
|
||||||
|
// MemoryPressureLevel corresponds to the memory pressure levels defined
|
||||||
|
// for memory cgroups.
|
||||||
|
type MemoryPressureLevel string
|
||||||
|
|
||||||
|
// The three memory pressure levels are as follows.
|
||||||
|
// - The "low" level means that the system is reclaiming memory for new
|
||||||
|
// allocations. Monitoring this reclaiming activity might be useful for
|
||||||
|
// maintaining cache level. Upon notification, the program (typically
|
||||||
|
// "Activity Manager") might analyze vmstat and act in advance (i.e.
|
||||||
|
// prematurely shutdown unimportant services).
|
||||||
|
// - The "medium" level means that the system is experiencing medium memory
|
||||||
|
// pressure, the system might be making swap, paging out active file caches,
|
||||||
|
// etc. Upon this event applications may decide to further analyze
|
||||||
|
// vmstat/zoneinfo/memcg or internal memory usage statistics and free any
|
||||||
|
// resources that can be easily reconstructed or re-read from a disk.
|
||||||
|
// - The "critical" level means that the system is actively thrashing, it is
|
||||||
|
// about to out of memory (OOM) or even the in-kernel OOM killer is on its
|
||||||
|
// way to trigger. Applications should do whatever they can to help the
|
||||||
|
// system. It might be too late to consult with vmstat or any other
|
||||||
|
// statistics, so it is advisable to take an immediate action.
|
||||||
|
// "https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt" Section 11
|
||||||
|
const (
|
||||||
|
LowPressure MemoryPressureLevel = "low"
|
||||||
|
MediumPressure MemoryPressureLevel = "medium"
|
||||||
|
CriticalPressure MemoryPressureLevel = "critical"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EventNotificationMode corresponds to the notification modes
|
||||||
|
// for the memory cgroups pressure level notifications.
|
||||||
|
type EventNotificationMode string
|
||||||
|
|
||||||
|
// There are three optional modes that specify different propagation behavior:
|
||||||
|
// - "default": this is the default behavior specified above. This mode is the
|
||||||
|
// same as omitting the optional mode parameter, preserved by backwards
|
||||||
|
// compatibility.
|
||||||
|
// - "hierarchy": events always propagate up to the root, similar to the default
|
||||||
|
// behavior, except that propagation continues regardless of whether there are
|
||||||
|
// event listeners at each level, with the "hierarchy" mode. In the above
|
||||||
|
// example, groups A, B, and C will receive notification of memory pressure.
|
||||||
|
// - "local": events are pass-through, i.e. they only receive notifications when
|
||||||
|
// memory pressure is experienced in the memcg for which the notification is
|
||||||
|
// registered. In the above example, group C will receive notification if
|
||||||
|
// registered for "local" notification and the group experiences memory
|
||||||
|
// pressure. However, group B will never receive notification, regardless if
|
||||||
|
// there is an event listener for group C or not, if group B is registered for
|
||||||
|
// local notification.
|
||||||
|
// "https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt" Section 11
|
||||||
|
const (
|
||||||
|
DefaultMode EventNotificationMode = "default"
|
||||||
|
LocalMode EventNotificationMode = "local"
|
||||||
|
HierarchyMode EventNotificationMode = "hierarchy"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewMemory returns a Memory controller given the root folder of cgroups.
|
||||||
|
// It may optionally accept other configuration options, such as IgnoreModules(...)
|
||||||
|
func NewMemory(root string, options ...func(*memoryController)) *memoryController {
|
||||||
|
mc := &memoryController{
|
||||||
|
root: filepath.Join(root, string(Memory)),
|
||||||
|
ignored: map[string]struct{}{},
|
||||||
|
}
|
||||||
|
for _, opt := range options {
|
||||||
|
opt(mc)
|
||||||
|
}
|
||||||
|
return mc
|
||||||
|
}
|
||||||
|
|
||||||
|
// IgnoreModules configure the memory controller to not read memory metrics for some
|
||||||
|
// module names (e.g. passing "memsw" would avoid all the memory.memsw.* entries)
|
||||||
|
func IgnoreModules(names ...string) func(*memoryController) {
|
||||||
|
return func(mc *memoryController) {
|
||||||
|
for _, name := range names {
|
||||||
|
mc.ignored[name] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OptionalSwap allows the memory controller to not fail if cgroups is not accounting
|
||||||
|
// Swap memory (there are no memory.memsw.* entries)
|
||||||
|
func OptionalSwap() func(*memoryController) {
|
||||||
|
return func(mc *memoryController) {
|
||||||
|
_, err := os.Stat(filepath.Join(mc.root, "memory.memsw.usage_in_bytes"))
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
mc.ignored["memsw"] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type memoryController struct {
|
||||||
|
root string
|
||||||
|
ignored map[string]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memoryController) Name() Name {
|
||||||
|
return Memory
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memoryController) Path(path string) string {
|
||||||
|
return filepath.Join(m.root, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memoryController) Create(path string, resources *specs.LinuxResources) error {
|
||||||
|
if err := os.MkdirAll(m.Path(path), defaultDirPerm); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if resources.Memory == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return m.set(path, getMemorySettings(resources))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memoryController) Update(path string, resources *specs.LinuxResources) error {
|
||||||
|
if resources.Memory == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
g := func(v *int64) bool {
|
||||||
|
return v != nil && *v > 0
|
||||||
|
}
|
||||||
|
settings := getMemorySettings(resources)
|
||||||
|
if g(resources.Memory.Limit) && g(resources.Memory.Swap) {
|
||||||
|
// if the updated swap value is larger than the current memory limit set the swap changes first
|
||||||
|
// then set the memory limit as swap must always be larger than the current limit
|
||||||
|
current, err := readUint(filepath.Join(m.Path(path), "memory.limit_in_bytes"))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if current < uint64(*resources.Memory.Swap) {
|
||||||
|
settings[0], settings[1] = settings[1], settings[0]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return m.set(path, settings)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memoryController) Stat(path string, stats *v1.Metrics) error {
|
||||||
|
fMemStat, err := os.Open(filepath.Join(m.Path(path), "memory.stat"))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer fMemStat.Close()
|
||||||
|
stats.Memory = &v1.MemoryStat{
|
||||||
|
Usage: &v1.MemoryEntry{},
|
||||||
|
Swap: &v1.MemoryEntry{},
|
||||||
|
Kernel: &v1.MemoryEntry{},
|
||||||
|
KernelTCP: &v1.MemoryEntry{},
|
||||||
|
}
|
||||||
|
if err := m.parseStats(fMemStat, stats.Memory); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fMemOomControl, err := os.Open(filepath.Join(m.Path(path), "memory.oom_control"))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer fMemOomControl.Close()
|
||||||
|
stats.MemoryOomControl = &v1.MemoryOomControl{}
|
||||||
|
if err := m.parseOomControlStats(fMemOomControl, stats.MemoryOomControl); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, t := range []struct {
|
||||||
|
module string
|
||||||
|
entry *v1.MemoryEntry
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
module: "",
|
||||||
|
entry: stats.Memory.Usage,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
module: "memsw",
|
||||||
|
entry: stats.Memory.Swap,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
module: "kmem",
|
||||||
|
entry: stats.Memory.Kernel,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
module: "kmem.tcp",
|
||||||
|
entry: stats.Memory.KernelTCP,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
if _, ok := m.ignored[t.module]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, tt := range []struct {
|
||||||
|
name string
|
||||||
|
value *uint64
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "usage_in_bytes",
|
||||||
|
value: &t.entry.Usage,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "max_usage_in_bytes",
|
||||||
|
value: &t.entry.Max,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "failcnt",
|
||||||
|
value: &t.entry.Failcnt,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "limit_in_bytes",
|
||||||
|
value: &t.entry.Limit,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
parts := []string{"memory"}
|
||||||
|
if t.module != "" {
|
||||||
|
parts = append(parts, t.module)
|
||||||
|
}
|
||||||
|
parts = append(parts, tt.name)
|
||||||
|
v, err := readUint(filepath.Join(m.Path(path), strings.Join(parts, ".")))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*tt.value = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memoryController) parseStats(r io.Reader, stat *v1.MemoryStat) error {
|
||||||
|
var (
|
||||||
|
raw = make(map[string]uint64)
|
||||||
|
sc = bufio.NewScanner(r)
|
||||||
|
line int
|
||||||
|
)
|
||||||
|
for sc.Scan() {
|
||||||
|
key, v, err := parseKV(sc.Text())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("%d: %v", line, err)
|
||||||
|
}
|
||||||
|
raw[key] = v
|
||||||
|
line++
|
||||||
|
}
|
||||||
|
if err := sc.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
stat.Cache = raw["cache"]
|
||||||
|
stat.RSS = raw["rss"]
|
||||||
|
stat.RSSHuge = raw["rss_huge"]
|
||||||
|
stat.MappedFile = raw["mapped_file"]
|
||||||
|
stat.Dirty = raw["dirty"]
|
||||||
|
stat.Writeback = raw["writeback"]
|
||||||
|
stat.PgPgIn = raw["pgpgin"]
|
||||||
|
stat.PgPgOut = raw["pgpgout"]
|
||||||
|
stat.PgFault = raw["pgfault"]
|
||||||
|
stat.PgMajFault = raw["pgmajfault"]
|
||||||
|
stat.InactiveAnon = raw["inactive_anon"]
|
||||||
|
stat.ActiveAnon = raw["active_anon"]
|
||||||
|
stat.InactiveFile = raw["inactive_file"]
|
||||||
|
stat.ActiveFile = raw["active_file"]
|
||||||
|
stat.Unevictable = raw["unevictable"]
|
||||||
|
stat.HierarchicalMemoryLimit = raw["hierarchical_memory_limit"]
|
||||||
|
stat.HierarchicalSwapLimit = raw["hierarchical_memsw_limit"]
|
||||||
|
stat.TotalCache = raw["total_cache"]
|
||||||
|
stat.TotalRSS = raw["total_rss"]
|
||||||
|
stat.TotalRSSHuge = raw["total_rss_huge"]
|
||||||
|
stat.TotalMappedFile = raw["total_mapped_file"]
|
||||||
|
stat.TotalDirty = raw["total_dirty"]
|
||||||
|
stat.TotalWriteback = raw["total_writeback"]
|
||||||
|
stat.TotalPgPgIn = raw["total_pgpgin"]
|
||||||
|
stat.TotalPgPgOut = raw["total_pgpgout"]
|
||||||
|
stat.TotalPgFault = raw["total_pgfault"]
|
||||||
|
stat.TotalPgMajFault = raw["total_pgmajfault"]
|
||||||
|
stat.TotalInactiveAnon = raw["total_inactive_anon"]
|
||||||
|
stat.TotalActiveAnon = raw["total_active_anon"]
|
||||||
|
stat.TotalInactiveFile = raw["total_inactive_file"]
|
||||||
|
stat.TotalActiveFile = raw["total_active_file"]
|
||||||
|
stat.TotalUnevictable = raw["total_unevictable"]
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memoryController) parseOomControlStats(r io.Reader, stat *v1.MemoryOomControl) error {
|
||||||
|
var (
|
||||||
|
raw = make(map[string]uint64)
|
||||||
|
sc = bufio.NewScanner(r)
|
||||||
|
line int
|
||||||
|
)
|
||||||
|
for sc.Scan() {
|
||||||
|
key, v, err := parseKV(sc.Text())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("%d: %v", line, err)
|
||||||
|
}
|
||||||
|
raw[key] = v
|
||||||
|
line++
|
||||||
|
}
|
||||||
|
if err := sc.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
stat.OomKillDisable = raw["oom_kill_disable"]
|
||||||
|
stat.UnderOom = raw["under_oom"]
|
||||||
|
stat.OomKill = raw["oom_kill"]
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memoryController) set(path string, settings []memorySettings) error {
|
||||||
|
for _, t := range settings {
|
||||||
|
if t.value != nil {
|
||||||
|
if err := retryingWriteFile(
|
||||||
|
filepath.Join(m.Path(path), "memory."+t.name),
|
||||||
|
[]byte(strconv.FormatInt(*t.value, 10)),
|
||||||
|
defaultFilePerm,
|
||||||
|
); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type memorySettings struct {
|
||||||
|
name string
|
||||||
|
value *int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func getMemorySettings(resources *specs.LinuxResources) []memorySettings {
|
||||||
|
mem := resources.Memory
|
||||||
|
var swappiness *int64
|
||||||
|
if mem.Swappiness != nil {
|
||||||
|
v := int64(*mem.Swappiness)
|
||||||
|
swappiness = &v
|
||||||
|
}
|
||||||
|
return []memorySettings{
|
||||||
|
{
|
||||||
|
name: "limit_in_bytes",
|
||||||
|
value: mem.Limit,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "soft_limit_in_bytes",
|
||||||
|
value: mem.Reservation,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "memsw.limit_in_bytes",
|
||||||
|
value: mem.Swap,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "kmem.limit_in_bytes",
|
||||||
|
value: mem.Kernel,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "kmem.tcp.limit_in_bytes",
|
||||||
|
value: mem.KernelTCP,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "oom_control",
|
||||||
|
value: getOomControlValue(mem),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "swappiness",
|
||||||
|
value: swappiness,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getOomControlValue(mem *specs.LinuxMemory) *int64 {
|
||||||
|
if mem.DisableOOMKiller != nil && *mem.DisableOOMKiller {
|
||||||
|
i := int64(1)
|
||||||
|
return &i
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memoryController) memoryEvent(path string, event MemoryEvent) (uintptr, error) {
|
||||||
|
root := m.Path(path)
|
||||||
|
efd, err := unix.Eventfd(0, unix.EFD_CLOEXEC)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
evtFile, err := os.Open(filepath.Join(root, event.EventFile()))
|
||||||
|
if err != nil {
|
||||||
|
unix.Close(efd)
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer evtFile.Close()
|
||||||
|
data := fmt.Sprintf("%d %d %s", efd, evtFile.Fd(), event.Arg())
|
||||||
|
evctlPath := filepath.Join(root, "cgroup.event_control")
|
||||||
|
if err := retryingWriteFile(evctlPath, []byte(data), 0700); err != nil {
|
||||||
|
unix.Close(efd)
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return uintptr(efd), nil
|
||||||
|
}
|
|
@ -0,0 +1,39 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cgroups
|
||||||
|
|
||||||
|
import "path/filepath"
|
||||||
|
|
||||||
|
func NewNamed(root string, name Name) *namedController {
|
||||||
|
return &namedController{
|
||||||
|
root: root,
|
||||||
|
name: name,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type namedController struct {
|
||||||
|
root string
|
||||||
|
name Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *namedController) Name() Name {
|
||||||
|
return n.name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *namedController) Path(path string) string {
|
||||||
|
return filepath.Join(n.root, string(n.name), path)
|
||||||
|
}
|
|
@ -0,0 +1,61 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cgroups
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewNetCls(root string) *netclsController {
|
||||||
|
return &netclsController{
|
||||||
|
root: filepath.Join(root, string(NetCLS)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type netclsController struct {
|
||||||
|
root string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *netclsController) Name() Name {
|
||||||
|
return NetCLS
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *netclsController) Path(path string) string {
|
||||||
|
return filepath.Join(n.root, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *netclsController) Create(path string, resources *specs.LinuxResources) error {
|
||||||
|
if err := os.MkdirAll(n.Path(path), defaultDirPerm); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if resources.Network != nil && resources.Network.ClassID != nil && *resources.Network.ClassID > 0 {
|
||||||
|
return retryingWriteFile(
|
||||||
|
filepath.Join(n.Path(path), "net_cls.classid"),
|
||||||
|
[]byte(strconv.FormatUint(uint64(*resources.Network.ClassID), 10)),
|
||||||
|
defaultFilePerm,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *netclsController) Update(path string, resources *specs.LinuxResources) error {
|
||||||
|
return n.Create(path, resources)
|
||||||
|
}
|
|
@ -0,0 +1,65 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cgroups
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewNetPrio(root string) *netprioController {
|
||||||
|
return &netprioController{
|
||||||
|
root: filepath.Join(root, string(NetPrio)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type netprioController struct {
|
||||||
|
root string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *netprioController) Name() Name {
|
||||||
|
return NetPrio
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *netprioController) Path(path string) string {
|
||||||
|
return filepath.Join(n.root, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *netprioController) Create(path string, resources *specs.LinuxResources) error {
|
||||||
|
if err := os.MkdirAll(n.Path(path), defaultDirPerm); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if resources.Network != nil {
|
||||||
|
for _, prio := range resources.Network.Priorities {
|
||||||
|
if err := retryingWriteFile(
|
||||||
|
filepath.Join(n.Path(path), "net_prio.ifpriomap"),
|
||||||
|
formatPrio(prio.Name, prio.Priority),
|
||||||
|
defaultFilePerm,
|
||||||
|
); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatPrio(name string, prio uint32) []byte {
|
||||||
|
return []byte(fmt.Sprintf("%s %d", name, prio))
|
||||||
|
}
|
|
@ -0,0 +1,61 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cgroups
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrIgnoreSubsystem allows the specific subsystem to be skipped
|
||||||
|
ErrIgnoreSubsystem = errors.New("skip subsystem")
|
||||||
|
// ErrDevicesRequired is returned when the devices subsystem is required but
|
||||||
|
// does not exist or is not active
|
||||||
|
ErrDevicesRequired = errors.New("devices subsystem is required")
|
||||||
|
)
|
||||||
|
|
||||||
|
// InitOpts allows configuration for the creation or loading of a cgroup
|
||||||
|
type InitOpts func(*InitConfig) error
|
||||||
|
|
||||||
|
// InitConfig provides configuration options for the creation
|
||||||
|
// or loading of a cgroup and its subsystems
|
||||||
|
type InitConfig struct {
|
||||||
|
// InitCheck can be used to check initialization errors from the subsystem
|
||||||
|
InitCheck InitCheck
|
||||||
|
}
|
||||||
|
|
||||||
|
func newInitConfig() *InitConfig {
|
||||||
|
return &InitConfig{
|
||||||
|
InitCheck: RequireDevices,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// InitCheck allows subsystems errors to be checked when initialized or loaded
|
||||||
|
type InitCheck func(Subsystem, Path, error) error
|
||||||
|
|
||||||
|
// AllowAny allows any subsystem errors to be skipped
|
||||||
|
func AllowAny(_ Subsystem, _ Path, _ error) error {
|
||||||
|
return ErrIgnoreSubsystem
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequireDevices requires the device subsystem but no others
|
||||||
|
func RequireDevices(s Subsystem, _ Path, _ error) error {
|
||||||
|
if s.Name() == Devices {
|
||||||
|
return ErrDevicesRequired
|
||||||
|
}
|
||||||
|
return ErrIgnoreSubsystem
|
||||||
|
}
|
|
@ -0,0 +1,106 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cgroups
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Path func(subsystem Name) (string, error)
|
||||||
|
|
||||||
|
func RootPath(subsystem Name) (string, error) {
|
||||||
|
return "/", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// StaticPath returns a static path to use for all cgroups
|
||||||
|
func StaticPath(path string) Path {
|
||||||
|
return func(_ Name) (string, error) {
|
||||||
|
return path, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NestedPath will nest the cgroups based on the calling processes cgroup
|
||||||
|
// placing its child processes inside its own path
|
||||||
|
func NestedPath(suffix string) Path {
|
||||||
|
paths, err := ParseCgroupFile("/proc/self/cgroup")
|
||||||
|
if err != nil {
|
||||||
|
return errorPath(err)
|
||||||
|
}
|
||||||
|
return existingPath(paths, suffix)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PidPath will return the correct cgroup paths for an existing process running inside a cgroup
|
||||||
|
// This is commonly used for the Load function to restore an existing container
|
||||||
|
func PidPath(pid int) Path {
|
||||||
|
p := fmt.Sprintf("/proc/%d/cgroup", pid)
|
||||||
|
paths, err := ParseCgroupFile(p)
|
||||||
|
if err != nil {
|
||||||
|
return errorPath(fmt.Errorf("parse cgroup file %s: %w", p, err))
|
||||||
|
}
|
||||||
|
return existingPath(paths, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrControllerNotActive is returned when a controller is not supported or enabled
|
||||||
|
var ErrControllerNotActive = errors.New("controller is not supported")
|
||||||
|
|
||||||
|
func existingPath(paths map[string]string, suffix string) Path {
|
||||||
|
// localize the paths based on the root mount dest for nested cgroups
|
||||||
|
for n, p := range paths {
|
||||||
|
dest, err := getCgroupDestination(n)
|
||||||
|
if err != nil {
|
||||||
|
return errorPath(err)
|
||||||
|
}
|
||||||
|
rel, err := filepath.Rel(dest, p)
|
||||||
|
if err != nil {
|
||||||
|
return errorPath(err)
|
||||||
|
}
|
||||||
|
if rel == "." {
|
||||||
|
rel = dest
|
||||||
|
}
|
||||||
|
paths[n] = filepath.Join("/", rel)
|
||||||
|
}
|
||||||
|
return func(name Name) (string, error) {
|
||||||
|
root, ok := paths[string(name)]
|
||||||
|
if !ok {
|
||||||
|
if root, ok = paths["name="+string(name)]; !ok {
|
||||||
|
return "", ErrControllerNotActive
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if suffix != "" {
|
||||||
|
return filepath.Join(root, suffix), nil
|
||||||
|
}
|
||||||
|
return root, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func subPath(path Path, subName string) Path {
|
||||||
|
return func(name Name) (string, error) {
|
||||||
|
p, err := path(name)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return filepath.Join(p, subName), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func errorPath(err error) Path {
|
||||||
|
return func(_ Name) (string, error) {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,37 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cgroups
|
||||||
|
|
||||||
|
import "path/filepath"
|
||||||
|
|
||||||
|
func NewPerfEvent(root string) *PerfEventController {
|
||||||
|
return &PerfEventController{
|
||||||
|
root: filepath.Join(root, string(PerfEvent)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type PerfEventController struct {
|
||||||
|
root string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PerfEventController) Name() Name {
|
||||||
|
return PerfEvent
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PerfEventController) Path(path string) string {
|
||||||
|
return filepath.Join(p.root, path)
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue