From aec131a8af479209471ae72f7c76ebcad5ef4182 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Wed, 15 Jan 2025 13:28:48 +0100 Subject: [PATCH 1/5] vendor: golang.org/x/sys v0.29.0 full diff: https://github.com/golang/sys/compare/v0.28.0...v0.29.0 Signed-off-by: Sebastiaan van Stijn --- go.mod | 2 +- go.sum | 4 ++-- vendor/golang.org/x/sys/unix/syscall_dragonfly.go | 12 ++++++++++++ vendor/golang.org/x/sys/windows/dll_windows.go | 11 +++++------ vendor/modules.txt | 2 +- 5 files changed, 21 insertions(+), 10 deletions(-) diff --git a/go.mod b/go.mod index fddc49b23ef6..0a6256aad19d 100644 --- a/go.mod +++ b/go.mod @@ -100,7 +100,7 @@ require ( golang.org/x/mod v0.21.0 golang.org/x/net v0.33.0 golang.org/x/sync v0.10.0 - golang.org/x/sys v0.28.0 + golang.org/x/sys v0.29.0 golang.org/x/time v0.6.0 google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 google.golang.org/grpc v1.69.4 diff --git a/go.sum b/go.sum index 5db0290dfeac..81a00adf5715 100644 --- a/go.sum +++ b/go.sum @@ -503,8 +503,8 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 97cb916f2c90..be8c0020701e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -246,6 +246,18 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return sendfile(outfd, infd, offset, count) } +func Dup3(oldfd, newfd, flags int) error { + if oldfd == newfd || flags&^O_CLOEXEC != 0 { + return EINVAL + } + how := F_DUP2FD + if flags&O_CLOEXEC != 0 { + how = F_DUP2FD_CLOEXEC + } + _, err := fcntl(oldfd, how, newfd) + return err +} + /* * Exposed directly */ diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index 4e613cf6335c..3ca814f54d44 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -43,8 +43,8 @@ type DLL struct { // LoadDLL loads DLL file into memory. // // Warning: using LoadDLL without an absolute path name is subject to -// DLL preloading attacks. To safely load a system DLL, use LazyDLL -// with System set to true, or use LoadLibraryEx directly. +// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL], +// or use [LoadLibraryEx] directly. func LoadDLL(name string) (dll *DLL, err error) { namep, err := UTF16PtrFromString(name) if err != nil { @@ -271,6 +271,9 @@ func (d *LazyDLL) NewProc(name string) *LazyProc { } // NewLazyDLL creates new LazyDLL associated with DLL file. +// +// Warning: using NewLazyDLL without an absolute path name is subject to +// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL]. func NewLazyDLL(name string) *LazyDLL { return &LazyDLL{Name: name} } @@ -410,7 +413,3 @@ func loadLibraryEx(name string, system bool) (*DLL, error) { } return &DLL{Name: name, Handle: h}, nil } - -type errString string - -func (s errString) Error() string { return string(s) } diff --git a/vendor/modules.txt b/vendor/modules.txt index 023b05794e90..ab04210c5eaa 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -974,7 +974,7 @@ golang.org/x/net/trace golang.org/x/sync/errgroup golang.org/x/sync/semaphore golang.org/x/sync/singleflight -# golang.org/x/sys v0.28.0 +# golang.org/x/sys v0.29.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/unix From f1e79ad4262089b33b5404b8ef3a4913c02242df Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Wed, 15 Jan 2025 13:27:08 +0100 Subject: [PATCH 2/5] vendor: github.com/containerd/go-cni v1.1.12 full diff: https://github.com/containerd/go-cni/compare/v1.1.11...v1.1.12 Signed-off-by: Sebastiaan van Stijn --- go.mod | 4 +- go.sum | 8 +- vendor/github.com/containerd/go-cni/cni.go | 39 +- .../containerd/go-cni/deprecated.go | 2 + vendor/github.com/containerd/go-cni/mutex.go | 23 ++ .../containerd/go-cni/mutex_deadlocks.go | 25 ++ vendor/github.com/containerd/go-cni/result.go | 2 - .../github.com/containerd/go-cni/testutils.go | 29 +- vendor/github.com/petermattis/goid/.gitignore | 4 + vendor/github.com/petermattis/goid/LICENSE | 202 +++++++++ vendor/github.com/petermattis/goid/README.md | 4 + vendor/github.com/petermattis/goid/goid.go | 35 ++ .../github.com/petermattis/goid/goid_gccgo.go | 26 ++ .../github.com/petermattis/goid/goid_go1.3.c | 23 ++ .../github.com/petermattis/goid/goid_go1.3.go | 22 + .../github.com/petermattis/goid/goid_go1.4.go | 35 ++ .../github.com/petermattis/goid/goid_go1.4.s | 18 + .../github.com/petermattis/goid/goid_go1.5.go | 28 ++ .../github.com/petermattis/goid/goid_go1.5.s | 44 ++ .../github.com/petermattis/goid/goid_slow.go | 24 ++ .../petermattis/goid/runtime_gccgo_go1.8.go | 17 + .../petermattis/goid/runtime_go1.23.go | 38 ++ .../petermattis/goid/runtime_go1.5.go | 57 +++ .../petermattis/goid/runtime_go1.6.go | 43 ++ .../petermattis/goid/runtime_go1.9.go | 37 ++ .../sasha-s/go-deadlock/.travis.yml | 26 ++ vendor/github.com/sasha-s/go-deadlock/LICENSE | 201 +++++++++ .../github.com/sasha-s/go-deadlock/Readme.md | 187 +++++++++ .../sasha-s/go-deadlock/deadlock.go | 387 ++++++++++++++++++ .../sasha-s/go-deadlock/deadlock_map.go | 10 + .../sasha-s/go-deadlock/stacktraces.go | 107 +++++ vendor/github.com/sasha-s/go-deadlock/test.sh | 12 + .../github.com/sasha-s/go-deadlock/trylock.go | 39 ++ vendor/modules.txt | 8 +- 34 files changed, 1716 insertions(+), 50 deletions(-) create mode 100644 vendor/github.com/containerd/go-cni/mutex.go create mode 100644 vendor/github.com/containerd/go-cni/mutex_deadlocks.go create mode 100644 vendor/github.com/petermattis/goid/.gitignore create mode 100644 vendor/github.com/petermattis/goid/LICENSE create mode 100644 vendor/github.com/petermattis/goid/README.md create mode 100644 vendor/github.com/petermattis/goid/goid.go create mode 100644 vendor/github.com/petermattis/goid/goid_gccgo.go create mode 100644 vendor/github.com/petermattis/goid/goid_go1.3.c create mode 100644 vendor/github.com/petermattis/goid/goid_go1.3.go create mode 100644 vendor/github.com/petermattis/goid/goid_go1.4.go create mode 100644 vendor/github.com/petermattis/goid/goid_go1.4.s create mode 100644 vendor/github.com/petermattis/goid/goid_go1.5.go create mode 100644 vendor/github.com/petermattis/goid/goid_go1.5.s create mode 100644 vendor/github.com/petermattis/goid/goid_slow.go create mode 100644 vendor/github.com/petermattis/goid/runtime_gccgo_go1.8.go create mode 100644 vendor/github.com/petermattis/goid/runtime_go1.23.go create mode 100644 vendor/github.com/petermattis/goid/runtime_go1.5.go create mode 100644 vendor/github.com/petermattis/goid/runtime_go1.6.go create mode 100644 vendor/github.com/petermattis/goid/runtime_go1.9.go create mode 100644 vendor/github.com/sasha-s/go-deadlock/.travis.yml create mode 100644 vendor/github.com/sasha-s/go-deadlock/LICENSE create mode 100644 vendor/github.com/sasha-s/go-deadlock/Readme.md create mode 100644 vendor/github.com/sasha-s/go-deadlock/deadlock.go create mode 100644 vendor/github.com/sasha-s/go-deadlock/deadlock_map.go create mode 100644 vendor/github.com/sasha-s/go-deadlock/stacktraces.go create mode 100644 vendor/github.com/sasha-s/go-deadlock/test.sh create mode 100644 vendor/github.com/sasha-s/go-deadlock/trylock.go diff --git a/go.mod b/go.mod index 0a6256aad19d..a05a77623fdc 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/containerd/continuity v0.4.5 github.com/containerd/errdefs v1.0.0 github.com/containerd/fuse-overlayfs-snapshotter/v2 v2.1.1 - github.com/containerd/go-cni v1.1.11 + github.com/containerd/go-cni v1.1.12 github.com/containerd/go-runc v1.1.0 github.com/containerd/log v0.1.0 github.com/containerd/nydus-snapshotter v0.15.0 @@ -164,11 +164,13 @@ require ( github.com/moby/term v0.5.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626 // indirect + github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.55.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/sasha-s/go-deadlock v0.3.5 // indirect github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect diff --git a/go.sum b/go.sum index 81a00adf5715..307c0a547daf 100644 --- a/go.sum +++ b/go.sum @@ -103,8 +103,8 @@ github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o= github.com/containerd/fuse-overlayfs-snapshotter/v2 v2.1.1 h1:3N/qo9fkWavYRLeUWsKWOulrLYLMLKcut6EQJoJ2q/A= github.com/containerd/fuse-overlayfs-snapshotter/v2 v2.1.1/go.mod h1:WFiRbg7aIgJozIvNe3r9aHszi6AQQigOPgnbkk9xMmo= -github.com/containerd/go-cni v1.1.11 h1:fWt1K15AmSLsEfa57N+qYw4NeGPiQKYq1pjNGJwV9mc= -github.com/containerd/go-cni v1.1.11/go.mod h1:/Y/sL8yqYQn1ZG1om1OncJB1W4zN3YmjfP/ShCzG/OY= +github.com/containerd/go-cni v1.1.12 h1:wm/5VD/i255hjM4uIZjBRiEQ7y98W9ACy/mHeLi4+94= +github.com/containerd/go-cni v1.1.12/go.mod h1:+jaqRBdtW5faJxj2Qwg1Of7GsV66xcvnCx4mSJtUlxU= github.com/containerd/go-runc v1.1.0 h1:OX4f+/i2y5sUT7LhmcJH7GYrjjhHa1QI4e8yO0gGleA= github.com/containerd/go-runc v1.1.0/go.mod h1:xJv2hFF7GvHtTJd9JqTS2UVxMkULUYw4JN5XAUZqH5U= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= @@ -316,6 +316,8 @@ github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170 h1:DiL github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170/go.mod h1:uQd4a7Rh3ZsVg5j0lNyAfyxIeGde9yrlhjF78GzeW0c= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 h1:Dx7Ovyv/SFnMFw3fD4oEoeorXc6saIiQ23LrGLth0Gw= +github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -350,6 +352,8 @@ github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sasha-s/go-deadlock v0.3.5 h1:tNCOEEDG6tBqrNDOX35j/7hL5FcFViG6awUGROb2NsU= +github.com/sasha-s/go-deadlock v0.3.5/go.mod h1:bugP6EGbdGYObIlx7pUZtWqlvo8k9H6vCBBsiChJQ5U= github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE= github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs= github.com/serialx/hashring v0.0.0-20200727003509-22c0c7ab6b1b h1:h+3JX2VoWTFuyQEo87pStk/a99dzIO1mM9KxIyLPGTU= diff --git a/vendor/github.com/containerd/go-cni/cni.go b/vendor/github.com/containerd/go-cni/cni.go index 003f3029319c..70ca3821e846 100644 --- a/vendor/github.com/containerd/go-cni/cni.go +++ b/vendor/github.com/containerd/go-cni/cni.go @@ -80,7 +80,10 @@ type libcni struct { cniConfig cnilibrary.CNI networkCount int // minimum network plugin configurations needed to initialize cni networks []*Network - sync.RWMutex + // Mutex contract: + // - lock in public methods: write lock when mutating the state, read lock when reading the state. + // - never lock in private methods. + RWMutex } func defaultCNIConfig() *libcni { @@ -135,11 +138,11 @@ func (c *libcni) Load(opts ...Opt) error { // Status returns the status of CNI initialization. func (c *libcni) Status() error { + c.RLock() + defer c.RUnlock() if err := c.ready(); err != nil { return err } - c.RLock() - defer c.RUnlock() // STATUS is only called for CNI Version 1.1.0 or greater. It is ignored for previous versions. for _, v := range c.networks { err := c.cniConfig.GetStatusNetworkList(context.Background(), v.config) @@ -162,11 +165,11 @@ func (c *libcni) Networks() []*Network { // Setup setups the network in the namespace and returns a Result func (c *libcni) Setup(ctx context.Context, id string, path string, opts ...NamespaceOpts) (*Result, error) { + c.RLock() + defer c.RUnlock() if err := c.ready(); err != nil { return nil, err } - c.RLock() - defer c.RUnlock() ns, err := newNamespace(id, path, opts...) if err != nil { return nil, err @@ -180,11 +183,11 @@ func (c *libcni) Setup(ctx context.Context, id string, path string, opts ...Name // SetupSerially setups the network in the namespace and returns a Result func (c *libcni) SetupSerially(ctx context.Context, id string, path string, opts ...NamespaceOpts) (*Result, error) { + c.RLock() + defer c.RUnlock() if err := c.ready(); err != nil { return nil, err } - c.RLock() - defer c.RUnlock() ns, err := newNamespace(id, path, opts...) if err != nil { return nil, err @@ -198,7 +201,7 @@ func (c *libcni) SetupSerially(ctx context.Context, id string, path string, opts func (c *libcni) attachNetworksSerially(ctx context.Context, ns *Namespace) ([]*types100.Result, error) { var results []*types100.Result - for _, network := range c.Networks() { + for _, network := range c.networks { r, err := network.Attach(ctx, ns) if err != nil { return nil, err @@ -223,15 +226,15 @@ func asynchAttach(ctx context.Context, index int, n *Network, ns *Namespace, wg func (c *libcni) attachNetworks(ctx context.Context, ns *Namespace) ([]*types100.Result, error) { var wg sync.WaitGroup var firstError error - results := make([]*types100.Result, len(c.Networks())) + results := make([]*types100.Result, len(c.networks)) rc := make(chan asynchAttachResult) - for i, network := range c.Networks() { + for i, network := range c.networks { wg.Add(1) go asynchAttach(ctx, i, network, ns, &wg, rc) } - for range c.Networks() { + for range c.networks { rs := <-rc if rs.err != nil && firstError == nil { firstError = rs.err @@ -245,16 +248,16 @@ func (c *libcni) attachNetworks(ctx context.Context, ns *Namespace) ([]*types100 // Remove removes the network config from the namespace func (c *libcni) Remove(ctx context.Context, id string, path string, opts ...NamespaceOpts) error { + c.RLock() + defer c.RUnlock() if err := c.ready(); err != nil { return err } - c.RLock() - defer c.RUnlock() ns, err := newNamespace(id, path, opts...) if err != nil { return err } - for _, network := range c.Networks() { + for _, network := range c.networks { if err := network.Remove(ctx, ns); err != nil { // Based on CNI spec v0.7.0, empty network namespace is allowed to // do best effort cleanup. However, it is not handled consistently @@ -275,16 +278,16 @@ func (c *libcni) Remove(ctx context.Context, id string, path string, opts ...Nam // Check checks if the network is still in desired state func (c *libcni) Check(ctx context.Context, id string, path string, opts ...NamespaceOpts) error { + c.RLock() + defer c.RUnlock() if err := c.ready(); err != nil { return err } - c.RLock() - defer c.RUnlock() ns, err := newNamespace(id, path, opts...) if err != nil { return err } - for _, network := range c.Networks() { + for _, network := range c.networks { err := network.Check(ctx, ns) if err != nil { return err @@ -329,8 +332,6 @@ func (c *libcni) reset() { } func (c *libcni) ready() error { - c.RLock() - defer c.RUnlock() if len(c.networks) < c.networkCount { return ErrCNINotInitialized } diff --git a/vendor/github.com/containerd/go-cni/deprecated.go b/vendor/github.com/containerd/go-cni/deprecated.go index 06afd15432de..fd8b3eb33005 100644 --- a/vendor/github.com/containerd/go-cni/deprecated.go +++ b/vendor/github.com/containerd/go-cni/deprecated.go @@ -30,5 +30,7 @@ type CNIResult = Result //revive:disable // type name will be used as cni.CNIRes // results fails, or if a network could not be found. // Deprecated: do not use func (c *libcni) GetCNIResultFromResults(results []*types100.Result) (*Result, error) { + c.RLock() + defer c.RUnlock() return c.createResult(results) } diff --git a/vendor/github.com/containerd/go-cni/mutex.go b/vendor/github.com/containerd/go-cni/mutex.go new file mode 100644 index 000000000000..8d40d62d0bbb --- /dev/null +++ b/vendor/github.com/containerd/go-cni/mutex.go @@ -0,0 +1,23 @@ +//go:build !deadlocks && !race + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cni + +import "sync" + +type RWMutex = sync.RWMutex diff --git a/vendor/github.com/containerd/go-cni/mutex_deadlocks.go b/vendor/github.com/containerd/go-cni/mutex_deadlocks.go new file mode 100644 index 000000000000..20e97b2b174c --- /dev/null +++ b/vendor/github.com/containerd/go-cni/mutex_deadlocks.go @@ -0,0 +1,25 @@ +//go:build deadlocks || race + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cni + +import ( + "github.com/sasha-s/go-deadlock" +) + +type RWMutex = deadlock.RWMutex diff --git a/vendor/github.com/containerd/go-cni/result.go b/vendor/github.com/containerd/go-cni/result.go index 0dd002e8e068..ea160dff3df3 100644 --- a/vendor/github.com/containerd/go-cni/result.go +++ b/vendor/github.com/containerd/go-cni/result.go @@ -69,8 +69,6 @@ type Config struct { // interfaces created in the namespace. It returns an error if validation of // results fails, or if a network could not be found. func (c *libcni) createResult(results []*types100.Result) (*Result, error) { - c.RLock() - defer c.RUnlock() r := &Result{ Interfaces: make(map[string]*Config), raw: results, diff --git a/vendor/github.com/containerd/go-cni/testutils.go b/vendor/github.com/containerd/go-cni/testutils.go index c27010039cad..d195feaf75c4 100644 --- a/vendor/github.com/containerd/go-cni/testutils.go +++ b/vendor/github.com/containerd/go-cni/testutils.go @@ -23,22 +23,11 @@ import ( "testing" ) -func makeTmpDir(prefix string) (string, error) { - tmpDir, err := os.MkdirTemp("", prefix) - if err != nil { - return "", err - } - return tmpDir, nil -} - func makeFakeCNIConfig(t *testing.T) (string, string) { - cniDir, err := makeTmpDir("fakecni") - if err != nil { - t.Fatalf("Failed to create plugin config dir: %v", err) - } + cniDir := t.TempDir() cniConfDir := path.Join(cniDir, "net.d") - err = os.MkdirAll(cniConfDir, 0777) + err := os.MkdirAll(cniConfDir, 0777) if err != nil { t.Fatalf("Failed to create network config dir: %v", err) } @@ -69,13 +58,6 @@ func makeFakeCNIConfig(t *testing.T) (string, string) { return cniDir, cniConfDir } -func tearDownCNIConfig(t *testing.T, confDir string) { - err := os.RemoveAll(confDir) - if err != nil { - t.Fatalf("Failed to cleanup CNI configs: %v", err) - } -} - func buildFakeConfig(t *testing.T) (string, string) { conf := ` { @@ -111,13 +93,10 @@ func buildFakeConfig(t *testing.T) (string, string) { ] }` - cniDir, err := makeTmpDir("fakecni") - if err != nil { - t.Fatalf("Failed to create plugin config dir: %v", err) - } + cniDir := t.TempDir() cniConfDir := path.Join(cniDir, "net.d") - err = os.MkdirAll(cniConfDir, 0777) + err := os.MkdirAll(cniConfDir, 0777) if err != nil { t.Fatalf("Failed to create network config dir: %v", err) } diff --git a/vendor/github.com/petermattis/goid/.gitignore b/vendor/github.com/petermattis/goid/.gitignore new file mode 100644 index 000000000000..2b9d6b552841 --- /dev/null +++ b/vendor/github.com/petermattis/goid/.gitignore @@ -0,0 +1,4 @@ +*~ +*.test +.*.swp +.DS_Store diff --git a/vendor/github.com/petermattis/goid/LICENSE b/vendor/github.com/petermattis/goid/LICENSE new file mode 100644 index 000000000000..e06d2081865a --- /dev/null +++ b/vendor/github.com/petermattis/goid/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/petermattis/goid/README.md b/vendor/github.com/petermattis/goid/README.md new file mode 100644 index 000000000000..3fd144c2c1b1 --- /dev/null +++ b/vendor/github.com/petermattis/goid/README.md @@ -0,0 +1,4 @@ +# goid ![Build Status](https://github.com/petermattis/goid/actions/workflows/go.yml/badge.svg) + +Programatically retrieve the current goroutine's ID. See [the CI +configuration](.github/workflows/go.yml) for supported Go versions. diff --git a/vendor/github.com/petermattis/goid/goid.go b/vendor/github.com/petermattis/goid/goid.go new file mode 100644 index 000000000000..408e619929a8 --- /dev/null +++ b/vendor/github.com/petermattis/goid/goid.go @@ -0,0 +1,35 @@ +// Copyright 2016 Peter Mattis. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. See the AUTHORS file +// for names of contributors. + +package goid + +import ( + "bytes" + "runtime" + "strconv" +) + +func ExtractGID(s []byte) int64 { + s = s[len("goroutine "):] + s = s[:bytes.IndexByte(s, ' ')] + gid, _ := strconv.ParseInt(string(s), 10, 64) + return gid +} + +// Parse the goid from runtime.Stack() output. Slow, but it works. +func getSlow() int64 { + var buf [64]byte + return ExtractGID(buf[:runtime.Stack(buf[:], false)]) +} diff --git a/vendor/github.com/petermattis/goid/goid_gccgo.go b/vendor/github.com/petermattis/goid/goid_gccgo.go new file mode 100644 index 000000000000..31c14d99a921 --- /dev/null +++ b/vendor/github.com/petermattis/goid/goid_gccgo.go @@ -0,0 +1,26 @@ +// Copyright 2018 Peter Mattis. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. See the AUTHORS file +// for names of contributors. + +//go:build gccgo +// +build gccgo + +package goid + +//extern runtime.getg +func getg() *g + +func Get() int64 { + return getg().goid +} diff --git a/vendor/github.com/petermattis/goid/goid_go1.3.c b/vendor/github.com/petermattis/goid/goid_go1.3.c new file mode 100644 index 000000000000..2e3f7ab79d26 --- /dev/null +++ b/vendor/github.com/petermattis/goid/goid_go1.3.c @@ -0,0 +1,23 @@ +// Copyright 2015 Peter Mattis. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. See the AUTHORS file +// for names of contributors. + +// +build !go1.4 + +#include + +void ·Get(int64 ret) { + ret = g->goid; + USED(&ret); +} diff --git a/vendor/github.com/petermattis/goid/goid_go1.3.go b/vendor/github.com/petermattis/goid/goid_go1.3.go new file mode 100644 index 000000000000..d73b6992018f --- /dev/null +++ b/vendor/github.com/petermattis/goid/goid_go1.3.go @@ -0,0 +1,22 @@ +// Copyright 2015 Peter Mattis. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. See the AUTHORS file +// for names of contributors. + +//go:build !go1.4 +// +build !go1.4 + +package goid + +// Get returns the id of the current goroutine. +func Get() int64 diff --git a/vendor/github.com/petermattis/goid/goid_go1.4.go b/vendor/github.com/petermattis/goid/goid_go1.4.go new file mode 100644 index 000000000000..4798980b3f9b --- /dev/null +++ b/vendor/github.com/petermattis/goid/goid_go1.4.go @@ -0,0 +1,35 @@ +// Copyright 2015 Peter Mattis. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. See the AUTHORS file +// for names of contributors. + +//go:build go1.4 && !go1.5 +// +build go1.4,!go1.5 + +package goid + +import "unsafe" + +var pointerSize = unsafe.Sizeof(uintptr(0)) + +// Backdoor access to runtime·getg(). +func getg() uintptr // in goid_go1.4.s + +// Get returns the id of the current goroutine. +func Get() int64 { + // The goid is the 16th field in the G struct where each field is a + // pointer, uintptr or padded to that size. See runtime.h from the + // Go sources. I'm not aware of a cleaner way to determine the + // offset. + return *(*int64)(unsafe.Pointer(getg() + 16*pointerSize)) +} diff --git a/vendor/github.com/petermattis/goid/goid_go1.4.s b/vendor/github.com/petermattis/goid/goid_go1.4.s new file mode 100644 index 000000000000..21a07d66243e --- /dev/null +++ b/vendor/github.com/petermattis/goid/goid_go1.4.s @@ -0,0 +1,18 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Assembly to get into package runtime without using exported symbols. +// See https://github.com/golang/go/blob/release-branch.go1.4/misc/cgo/test/backdoor/thunk.s + +// +build amd64 amd64p32 arm 386 +// +build go1.4,!go1.5 + +#include "textflag.h" + +#ifdef GOARCH_arm +#define JMP B +#endif + +TEXT ·getg(SB),NOSPLIT,$0-0 + JMP runtime·getg(SB) diff --git a/vendor/github.com/petermattis/goid/goid_go1.5.go b/vendor/github.com/petermattis/goid/goid_go1.5.go new file mode 100644 index 000000000000..4521f792016c --- /dev/null +++ b/vendor/github.com/petermattis/goid/goid_go1.5.go @@ -0,0 +1,28 @@ +// Copyright 2016 Peter Mattis. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. See the AUTHORS file +// for names of contributors. + +//go:build (386 || amd64 || amd64p32 || arm || arm64 || s390x) && gc && go1.5 +// +build 386 amd64 amd64p32 arm arm64 s390x +// +build gc +// +build go1.5 + +package goid + +// Defined in goid_go1.5.s. +func getg() *g + +func Get() int64 { + return getg().goid +} diff --git a/vendor/github.com/petermattis/goid/goid_go1.5.s b/vendor/github.com/petermattis/goid/goid_go1.5.s new file mode 100644 index 000000000000..c49333f14a68 --- /dev/null +++ b/vendor/github.com/petermattis/goid/goid_go1.5.s @@ -0,0 +1,44 @@ +// Copyright 2021 Peter Mattis. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. See the AUTHORS file +// for names of contributors. + +// Assembly to mimic runtime.getg. + +//go:build (386 || amd64 || amd64p32 || arm || arm64 || s390x) && gc && go1.5 +// +build 386 amd64 amd64p32 arm arm64 s390x +// +build gc +// +build go1.5 + +#include "textflag.h" + +// func getg() *g +TEXT ·getg(SB),NOSPLIT,$0-8 +#ifdef GOARCH_386 + MOVL (TLS), AX + MOVL AX, ret+0(FP) +#endif +#ifdef GOARCH_amd64 + MOVQ (TLS), AX + MOVQ AX, ret+0(FP) +#endif +#ifdef GOARCH_arm + MOVW g, ret+0(FP) +#endif +#ifdef GOARCH_arm64 + MOVD g, ret+0(FP) +#endif +#ifdef GOARCH_s390x + MOVD g, ret+0(FP) +#endif + RET diff --git a/vendor/github.com/petermattis/goid/goid_slow.go b/vendor/github.com/petermattis/goid/goid_slow.go new file mode 100644 index 000000000000..8bdb4357e056 --- /dev/null +++ b/vendor/github.com/petermattis/goid/goid_slow.go @@ -0,0 +1,24 @@ +// Copyright 2016 Peter Mattis. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. See the AUTHORS file +// for names of contributors. + +//go:build (go1.4 && !go1.5 && !amd64 && !amd64p32 && !arm && !386) || (go1.5 && !386 && !amd64 && !amd64p32 && !arm && !arm64 && !s390x) +// +build go1.4,!go1.5,!amd64,!amd64p32,!arm,!386 go1.5,!386,!amd64,!amd64p32,!arm,!arm64,!s390x + +package goid + +// Get returns the id of the current goroutine. +func Get() int64 { + return getSlow() +} diff --git a/vendor/github.com/petermattis/goid/runtime_gccgo_go1.8.go b/vendor/github.com/petermattis/goid/runtime_gccgo_go1.8.go new file mode 100644 index 000000000000..dfcb74e0c464 --- /dev/null +++ b/vendor/github.com/petermattis/goid/runtime_gccgo_go1.8.go @@ -0,0 +1,17 @@ +//go:build gccgo && go1.8 +// +build gccgo,go1.8 + +package goid + +// https://github.com/gcc-mirror/gcc/blob/releases/gcc-7/libgo/go/runtime/runtime2.go#L329-L354 + +type g struct { + _panic uintptr + _defer uintptr + m uintptr + syscallsp uintptr + syscallpc uintptr + param uintptr + atomicstatus uint32 + goid int64 // Here it is! +} diff --git a/vendor/github.com/petermattis/goid/runtime_go1.23.go b/vendor/github.com/petermattis/goid/runtime_go1.23.go new file mode 100644 index 000000000000..146d81734a02 --- /dev/null +++ b/vendor/github.com/petermattis/goid/runtime_go1.23.go @@ -0,0 +1,38 @@ +//go:build gc && go1.23 +// +build gc,go1.23 + +package goid + +type stack struct { + lo uintptr + hi uintptr +} + +type gobuf struct { + sp uintptr + pc uintptr + g uintptr + ctxt uintptr + ret uintptr + lr uintptr + bp uintptr +} + +type g struct { + stack stack + stackguard0 uintptr + stackguard1 uintptr + + _panic uintptr + _defer uintptr + m uintptr + sched gobuf + syscallsp uintptr + syscallpc uintptr + syscallbp uintptr + stktopsp uintptr + param uintptr + atomicstatus uint32 + stackLock uint32 + goid int64 // Here it is! +} diff --git a/vendor/github.com/petermattis/goid/runtime_go1.5.go b/vendor/github.com/petermattis/goid/runtime_go1.5.go new file mode 100644 index 000000000000..6ce2ab8eee10 --- /dev/null +++ b/vendor/github.com/petermattis/goid/runtime_go1.5.go @@ -0,0 +1,57 @@ +// Copyright 2016 Peter Mattis. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. See the AUTHORS file +// for names of contributors. + +//go:build go1.5 && !go1.6 +// +build go1.5,!go1.6 + +package goid + +// Just enough of the structs from runtime/runtime2.go to get the offset to goid. +// See https://github.com/golang/go/blob/release-branch.go1.5/src/runtime/runtime2.go + +type stack struct { + lo uintptr + hi uintptr +} + +type gobuf struct { + sp uintptr + pc uintptr + g uintptr + ctxt uintptr + ret uintptr + lr uintptr + bp uintptr +} + +type g struct { + stack stack + stackguard0 uintptr + stackguard1 uintptr + + _panic uintptr + _defer uintptr + m uintptr + stackAlloc uintptr + sched gobuf + syscallsp uintptr + syscallpc uintptr + stkbar []uintptr + stkbarPos uintptr + param uintptr + atomicstatus uint32 + stackLock uint32 + goid int64 // Here it is! +} diff --git a/vendor/github.com/petermattis/goid/runtime_go1.6.go b/vendor/github.com/petermattis/goid/runtime_go1.6.go new file mode 100644 index 000000000000..983d55bc477d --- /dev/null +++ b/vendor/github.com/petermattis/goid/runtime_go1.6.go @@ -0,0 +1,43 @@ +//go:build gc && go1.6 && !go1.9 +// +build gc,go1.6,!go1.9 + +package goid + +// Just enough of the structs from runtime/runtime2.go to get the offset to goid. +// See https://github.com/golang/go/blob/release-branch.go1.6/src/runtime/runtime2.go + +type stack struct { + lo uintptr + hi uintptr +} + +type gobuf struct { + sp uintptr + pc uintptr + g uintptr + ctxt uintptr + ret uintptr + lr uintptr + bp uintptr +} + +type g struct { + stack stack + stackguard0 uintptr + stackguard1 uintptr + + _panic uintptr + _defer uintptr + m uintptr + stackAlloc uintptr + sched gobuf + syscallsp uintptr + syscallpc uintptr + stkbar []uintptr + stkbarPos uintptr + stktopsp uintptr + param uintptr + atomicstatus uint32 + stackLock uint32 + goid int64 // Here it is! +} diff --git a/vendor/github.com/petermattis/goid/runtime_go1.9.go b/vendor/github.com/petermattis/goid/runtime_go1.9.go new file mode 100644 index 000000000000..f9ef8f5ffe6d --- /dev/null +++ b/vendor/github.com/petermattis/goid/runtime_go1.9.go @@ -0,0 +1,37 @@ +//go:build gc && go1.9 && !go1.23 +// +build gc,go1.9,!go1.23 + +package goid + +type stack struct { + lo uintptr + hi uintptr +} + +type gobuf struct { + sp uintptr + pc uintptr + g uintptr + ctxt uintptr + ret uintptr + lr uintptr + bp uintptr +} + +type g struct { + stack stack + stackguard0 uintptr + stackguard1 uintptr + + _panic uintptr + _defer uintptr + m uintptr + sched gobuf + syscallsp uintptr + syscallpc uintptr + stktopsp uintptr + param uintptr + atomicstatus uint32 + stackLock uint32 + goid int64 // Here it is! +} diff --git a/vendor/github.com/sasha-s/go-deadlock/.travis.yml b/vendor/github.com/sasha-s/go-deadlock/.travis.yml new file mode 100644 index 000000000000..5180de6adebb --- /dev/null +++ b/vendor/github.com/sasha-s/go-deadlock/.travis.yml @@ -0,0 +1,26 @@ +arch: + - amd64 + - ppc64le +language: go +sudo: false +go: + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - 1.13.x + - 1.14.x + - 1.15.x + - 1.16.x + - master + - tip + +before_install: + - go get golang.org/x/tools/cmd/cover + - go get -t -v ./... + +script: + - ./test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/sasha-s/go-deadlock/LICENSE b/vendor/github.com/sasha-s/go-deadlock/LICENSE new file mode 100644 index 000000000000..8dada3edaf50 --- /dev/null +++ b/vendor/github.com/sasha-s/go-deadlock/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/sasha-s/go-deadlock/Readme.md b/vendor/github.com/sasha-s/go-deadlock/Readme.md new file mode 100644 index 000000000000..792d8a205a43 --- /dev/null +++ b/vendor/github.com/sasha-s/go-deadlock/Readme.md @@ -0,0 +1,187 @@ +# Online deadlock detection in go (golang). [![Try it online](https://img.shields.io/badge/try%20it-online-blue.svg)](https://wandbox.org/permlink/hJc6QCZowxbNm9WW) [![Docs](https://godoc.org/github.com/sasha-s/go-deadlock?status.svg)](https://godoc.org/github.com/sasha-s/go-deadlock) [![Build Status](https://travis-ci.com/sasha-s/go-deadlock.svg?branch=master)](https://travis-ci.com/sasha-s/go-deadlock) [![codecov](https://codecov.io/gh/sasha-s/go-deadlock/branch/master/graph/badge.svg)](https://codecov.io/gh/sasha-s/go-deadlock) [![version](https://badge.fury.io/gh/sasha-s%2Fgo-deadlock.svg)](https://github.com/sasha-s/go-deadlock/releases) [![Go Report Card](https://goreportcard.com/badge/github.com/sasha-s/go-deadlock)](https://goreportcard.com/report/github.com/sasha-s/go-deadlock) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) + +## Why +Deadlocks happen and are painful to debug. + +## What +go-deadlock provides (RW)Mutex drop-in replacements for sync.(RW)Mutex. +It would not work if you create a spaghetti of channels. +Mutexes only. + +## Installation +```sh +go get github.com/sasha-s/go-deadlock/... +``` + +## Usage +```go +import "github.com/sasha-s/go-deadlock" +var mu deadlock.Mutex +// Use normally, it works exactly like sync.Mutex does. +mu.Lock() + +defer mu.Unlock() +// Or +var rw deadlock.RWMutex +rw.RLock() +defer rw.RUnlock() +``` + +### Deadlocks +One of the most common sources of deadlocks is inconsistent lock ordering: +say, you have two mutexes A and B, and in some goroutines you have +```go +A.Lock() // defer A.Unlock() or similar. +... +B.Lock() // defer B.Unlock() or similar. +``` +And in another goroutine the order of locks is reversed: +```go +B.Lock() // defer B.Unlock() or similar. +... +A.Lock() // defer A.Unlock() or similar. +``` + +Another common sources of deadlocks is duplicate take a lock in a goroutine: +``` +A.Rlock() or lock() + +A.lock() or A.RLock() +``` + +This does not guarantee a deadlock (maybe the goroutines above can never be running at the same time), but it usually a design flaw at least. + +go-deadlock can detect such cases (unless you cross goroutine boundary - say lock A, then spawn a goroutine, block until it is singals, and lock B inside of the goroutine), even if the deadlock itself happens very infrequently and is painful to reproduce! + +Each time go-deadlock sees a lock attempt for lock B, it records the order A before B, for each lock that is currently being held in the same goroutine, and it prints (and exits the program by default) when it sees the locking order being violated. + +In addition, if it sees that we are waiting on a lock for a long time (opts.DeadlockTimeout, 30 seconds by default), it reports a potential deadlock, also printing the stacktrace for a goroutine that is currently holding the lock we are desperately trying to grab. + + +## Sample output +#### Inconsistent lock ordering: +``` +POTENTIAL DEADLOCK: Inconsistent locking. saw this ordering in one goroutine: +happened before +inmem.go:623 bttest.(*server).ReadModifyWriteRow { r.mu.Lock() } <<<<< +inmem_test.go:118 bttest.TestConcurrentMutationsReadModifyAndGC.func4 { _, _ = s.ReadModifyWriteRow(ctx, rmw()) } + +happened after +inmem.go:629 bttest.(*server).ReadModifyWriteRow { tbl.mu.RLock() } <<<<< +inmem_test.go:118 bttest.TestConcurrentMutationsReadModifyAndGC.func4 { _, _ = s.ReadModifyWriteRow(ctx, rmw()) } + +in another goroutine: happened before +inmem.go:799 bttest.(*table).gc { t.mu.RLock() } <<<<< +inmem_test.go:125 bttest.TestConcurrentMutationsReadModifyAndGC.func5 { tbl.gc() } + +happend after +inmem.go:814 bttest.(*table).gc { r.mu.Lock() } <<<<< +inmem_test.go:125 bttest.TestConcurrentMutationsReadModifyAndGC.func5 { tbl.gc() } +``` + +#### Waiting for a lock for a long time: + +``` +POTENTIAL DEADLOCK: +Previous place where the lock was grabbed +goroutine 240 lock 0xc820160440 +inmem.go:799 bttest.(*table).gc { t.mu.RLock() } <<<<< +inmem_test.go:125 bttest.TestConcurrentMutationsReadModifyAndGC.func5 { tbl.gc() } + +Have been trying to lock it again for more than 40ms +goroutine 68 lock 0xc820160440 +inmem.go:785 bttest.(*table).mutableRow { t.mu.Lock() } <<<<< +inmem.go:428 bttest.(*server).MutateRow { r := tbl.mutableRow(string(req.RowKey)) } +inmem_test.go:111 bttest.TestConcurrentMutationsReadModifyAndGC.func3 { s.MutateRow(ctx, req) } + + +Here is what goroutine 240 doing now +goroutine 240 [select]: +github.com/sasha-s/go-deadlock.lock(0xc82028ca10, 0x5189e0, 0xc82013a9b0) + /Users/sasha/go/src/github.com/sasha-s/go-deadlock/deadlock.go:163 +0x1640 +github.com/sasha-s/go-deadlock.(*Mutex).Lock(0xc82013a9b0) + /Users/sasha/go/src/github.com/sasha-s/go-deadlock/deadlock.go:54 +0x86 +google.golang.org/cloud/bigtable/bttest.(*table).gc(0xc820160440) + /Users/sasha/go/src/google.golang.org/cloud/bigtable/bttest/inmem.go:814 +0x28d +google.golang.org/cloud/bigtable/bttest.TestConcurrentMutationsReadModifyAndGC.func5(0xc82015c760, 0xc820160440) /Users/sasha/go/src/google.golang.org/cloud/bigtable/bttest/inmem_test.go:125 +0x48 +created by google.golang.org/cloud/bigtable/bttest.TestConcurrentMutationsReadModifyAndGC + /Users/sasha/go/src/google.golang.org/cloud/bigtable/bttest/inmem_test.go:126 +0xb6f +``` + +## Used in +[cockroachdb: Potential deadlock between Gossip.SetStorage and Node.gossipStores](https://github.com/cockroachdb/cockroach/issues/7972) + +[bigtable/bttest: A race between GC and row mutations](https://code-review.googlesource.com#/c/5301/) + +## Need a mutex that works with net.context? +I have [one](https://github.com/sasha-s/go-csync). + +## Grabbing an RLock twice from the same goroutine +This is, surprisingly, not a good idea! + +From [RWMutex](https://golang.org/pkg/sync/#RWMutex) docs: + +>If a goroutine holds a RWMutex for reading and another goroutine might call Lock, no goroutine should expect to be able to acquire a read lock until the initial read lock is released. In particular, this prohibits recursive read locking. This is to ensure that the lock eventually becomes available; a blocked Lock call excludes new readers from acquiring the lock. + + +The following code will deadlock — [run the example on playground](https://play.golang.org/p/AkL-W63nq5f) or [try it online with go-deadlock on wandbox](https://wandbox.org/permlink/JwnL0GMySBju4SII): +```go +package main + +import ( + "fmt" + "sync" +) + +func main() { + var mu sync.RWMutex + + chrlockTwice := make(chan struct{}) // Used to control rlockTwice + rlockTwice := func() { + mu.RLock() + fmt.Println("first Rlock succeeded") + <-chrlockTwice + <-chrlockTwice + fmt.Println("trying to Rlock again") + mu.RLock() + fmt.Println("second Rlock succeeded") + mu.RUnlock() + mu.RUnlock() + } + + chLock := make(chan struct{}) // Used to contol lock + lock := func() { + <-chLock + fmt.Println("about to Lock") + mu.Lock() + fmt.Println("Lock succeeded") + mu.Unlock() + <-chLock + } + + control := func() { + chrlockTwice <- struct{}{} + chLock <- struct{}{} + + close(chrlockTwice) + close(chLock) + } + + go control() + go lock() + rlockTwice() +} +``` +## Configuring go-deadlock + +Have a look at [Opts](https://pkg.go.dev/github.com/sasha-s/go-deadlock#pkg-variables). + +* `Opts.Disable`: disables deadlock detection altogether +* `Opts.DisableLockOrderDetection`: disables lock order based deadlock detection. +* `Opts.DeadlockTimeout`: blocking on mutex for longer than DeadlockTimeout is considered a deadlock. ignored if negative +* `Opts.OnPotentialDeadlock`: callback for then deadlock is detected +* `Opts.MaxMapSize`: size of happens before // happens after table +* `Opts.PrintAllCurrentGoroutines`: dump stacktraces of all goroutines when inconsistent locking is detected, verbose +* `Opts.LogBuf`: where to write deadlock info/stacktraces + + diff --git a/vendor/github.com/sasha-s/go-deadlock/deadlock.go b/vendor/github.com/sasha-s/go-deadlock/deadlock.go new file mode 100644 index 000000000000..a285c751db0b --- /dev/null +++ b/vendor/github.com/sasha-s/go-deadlock/deadlock.go @@ -0,0 +1,387 @@ +package deadlock + +import ( + "bufio" + "bytes" + "fmt" + "io" + "os" + "sync" + "time" + + "github.com/petermattis/goid" +) + +// Opts control how deadlock detection behaves. +// Options are supposed to be set once at a startup (say, when parsing flags). +var Opts = struct { + // Mutex/RWMutex would work exactly as their sync counterparts + // -- almost no runtime penalty, no deadlock detection if Disable == true. + Disable bool + // Would disable lock order based deadlock detection if DisableLockOrderDetection == true. + DisableLockOrderDetection bool + // Waiting for a lock for longer than DeadlockTimeout is considered a deadlock. + // Ignored if DeadlockTimeout <= 0. + DeadlockTimeout time.Duration + // OnPotentialDeadlock is called each time a potential deadlock is detected -- either based on + // lock order or on lock wait time. + OnPotentialDeadlock func() + // Will keep MaxMapSize lock pairs (happens before // happens after) in the map. + // The map resets once the threshold is reached. + MaxMapSize int + // Will dump stacktraces of all goroutines when inconsistent locking is detected. + PrintAllCurrentGoroutines bool + mu *sync.Mutex // Protects the LogBuf. + // Will print deadlock info to log buffer. + LogBuf io.Writer +}{ + DeadlockTimeout: time.Second * 30, + OnPotentialDeadlock: func() { + os.Exit(2) + }, + MaxMapSize: 1024 * 64, + mu: &sync.Mutex{}, + LogBuf: os.Stderr, +} + +// Cond is sync.Cond wrapper +type Cond struct { + sync.Cond +} + +// Locker is sync.Locker wrapper +type Locker struct { + sync.Locker +} + +// Once is sync.Once wrapper +type Once struct { + sync.Once +} + +// Pool is sync.Poll wrapper +type Pool struct { + sync.Pool +} + +// WaitGroup is sync.WaitGroup wrapper +type WaitGroup struct { + sync.WaitGroup +} + +// NewCond is a sync.NewCond wrapper +var NewCond = sync.NewCond + +// A Mutex is a drop-in replacement for sync.Mutex. +// Performs deadlock detection unless disabled in Opts. +type Mutex struct { + mu sync.Mutex +} + +// Lock locks the mutex. +// If the lock is already in use, the calling goroutine +// blocks until the mutex is available. +// +// Unless deadlock detection is disabled, logs potential deadlocks to Opts.LogBuf, +// calling Opts.OnPotentialDeadlock on each occasion. +func (m *Mutex) Lock() { + lock(m.mu.Lock, m) +} + +// Unlock unlocks the mutex. +// It is a run-time error if m is not locked on entry to Unlock. +// +// A locked Mutex is not associated with a particular goroutine. +// It is allowed for one goroutine to lock a Mutex and then +// arrange for another goroutine to unlock it. +func (m *Mutex) Unlock() { + m.mu.Unlock() + if !Opts.Disable { + postUnlock(m) + } +} + +// An RWMutex is a drop-in replacement for sync.RWMutex. +// Performs deadlock detection unless disabled in Opts. +type RWMutex struct { + mu sync.RWMutex +} + +// Lock locks rw for writing. +// If the lock is already locked for reading or writing, +// Lock blocks until the lock is available. +// To ensure that the lock eventually becomes available, +// a blocked Lock call excludes new readers from acquiring +// the lock. +// +// Unless deadlock detection is disabled, logs potential deadlocks to Opts.LogBuf, +// calling Opts.OnPotentialDeadlock on each occasion. +func (m *RWMutex) Lock() { + lock(m.mu.Lock, m) +} + +// Unlock unlocks the mutex for writing. It is a run-time error if rw is +// not locked for writing on entry to Unlock. +// +// As with Mutexes, a locked RWMutex is not associated with a particular +// goroutine. One goroutine may RLock (Lock) an RWMutex and then +// arrange for another goroutine to RUnlock (Unlock) it. +func (m *RWMutex) Unlock() { + m.mu.Unlock() + if !Opts.Disable { + postUnlock(m) + } +} + +// RLock locks the mutex for reading. +// +// Unless deadlock detection is disabled, logs potential deadlocks to Opts.LogBuf, +// calling Opts.OnPotentialDeadlock on each occasion. +func (m *RWMutex) RLock() { + lock(m.mu.RLock, m) +} + +// RUnlock undoes a single RLock call; +// it does not affect other simultaneous readers. +// It is a run-time error if rw is not locked for reading +// on entry to RUnlock. +func (m *RWMutex) RUnlock() { + m.mu.RUnlock() + if !Opts.Disable { + postUnlock(m) + } +} + +// RLocker returns a Locker interface that implements +// the Lock and Unlock methods by calling RLock and RUnlock. +func (m *RWMutex) RLocker() sync.Locker { + return (*rlocker)(m) +} + +func preLock(stack []uintptr, p interface{}) { + lo.preLock(stack, p) +} + +func postLock(stack []uintptr, p interface{}) { + lo.postLock(stack, p) +} + +func postUnlock(p interface{}) { + lo.postUnlock(p) +} + +func lock(lockFn func(), ptr interface{}) { + if Opts.Disable { + lockFn() + return + } + stack := callers(1) + preLock(stack, ptr) + if Opts.DeadlockTimeout <= 0 { + lockFn() + } else { + ch := make(chan struct{}) + currentID := goid.Get() + go checkDeadlock(stack, ptr, currentID, ch) + lockFn() + postLock(stack, ptr) + close(ch) + return + } + postLock(stack, ptr) +} + +var timersPool sync.Pool + +func acquireTimer(d time.Duration) *time.Timer { + t, ok := timersPool.Get().(*time.Timer) + if ok { + _ = t.Reset(d) + return t + } + return time.NewTimer(Opts.DeadlockTimeout) +} + +func releaseTimer(t *time.Timer) { + if !t.Stop() { + <-t.C + } + timersPool.Put(t) +} + +func checkDeadlock(stack []uintptr, ptr interface{}, currentID int64, ch <-chan struct{}) { + t := acquireTimer(Opts.DeadlockTimeout) + defer releaseTimer(t) + for { + select { + case <-t.C: + lo.mu.Lock() + prev, ok := lo.cur[ptr] + if !ok { + lo.mu.Unlock() + break // Nobody seems to be holding the lock, try again. + } + Opts.mu.Lock() + fmt.Fprintln(Opts.LogBuf, header) + fmt.Fprintln(Opts.LogBuf, "Previous place where the lock was grabbed") + fmt.Fprintf(Opts.LogBuf, "goroutine %v lock %p\n", prev.gid, ptr) + printStack(Opts.LogBuf, prev.stack) + fmt.Fprintln(Opts.LogBuf, "Have been trying to lock it again for more than", Opts.DeadlockTimeout) + fmt.Fprintf(Opts.LogBuf, "goroutine %v lock %p\n", currentID, ptr) + printStack(Opts.LogBuf, stack) + stacks := stacks() + grs := bytes.Split(stacks, []byte("\n\n")) + for _, g := range grs { + if goid.ExtractGID(g) == prev.gid { + fmt.Fprintln(Opts.LogBuf, "Here is what goroutine", prev.gid, "doing now") + Opts.LogBuf.Write(g) + fmt.Fprintln(Opts.LogBuf) + } + } + lo.other(ptr) + if Opts.PrintAllCurrentGoroutines { + fmt.Fprintln(Opts.LogBuf, "All current goroutines:") + Opts.LogBuf.Write(stacks) + } + fmt.Fprintln(Opts.LogBuf) + if buf, ok := Opts.LogBuf.(*bufio.Writer); ok { + buf.Flush() + } + Opts.mu.Unlock() + lo.mu.Unlock() + Opts.OnPotentialDeadlock() + <-ch + return + case <-ch: + return + } + t.Reset(Opts.DeadlockTimeout) + } +} + +type lockOrder struct { + mu sync.Mutex + cur map[interface{}]stackGID // stacktraces + gids for the locks currently taken. + order map[beforeAfter]ss // expected order of locks. +} + +type stackGID struct { + stack []uintptr + gid int64 +} + +type beforeAfter struct { + before interface{} + after interface{} +} + +type ss struct { + before []uintptr + after []uintptr +} + +var lo = newLockOrder() + +func newLockOrder() *lockOrder { + return &lockOrder{ + cur: map[interface{}]stackGID{}, + order: map[beforeAfter]ss{}, + } +} + +func (l *lockOrder) postLock(stack []uintptr, p interface{}) { + gid := goid.Get() + l.mu.Lock() + l.cur[p] = stackGID{stack, gid} + l.mu.Unlock() +} + +func (l *lockOrder) preLock(stack []uintptr, p interface{}) { + if Opts.DisableLockOrderDetection { + return + } + gid := goid.Get() + l.mu.Lock() + for b, bs := range l.cur { + if b == p { + if bs.gid == gid { + Opts.mu.Lock() + fmt.Fprintln(Opts.LogBuf, header, "Recursive locking:") + fmt.Fprintf(Opts.LogBuf, "current goroutine %d lock %p\n", gid, b) + printStack(Opts.LogBuf, stack) + fmt.Fprintln(Opts.LogBuf, "Previous place where the lock was grabbed (same goroutine)") + printStack(Opts.LogBuf, bs.stack) + l.other(p) + if buf, ok := Opts.LogBuf.(*bufio.Writer); ok { + buf.Flush() + } + Opts.mu.Unlock() + Opts.OnPotentialDeadlock() + } + continue + } + if bs.gid != gid { // We want locks taken in the same goroutine only. + continue + } + if s, ok := l.order[beforeAfter{p, b}]; ok { + Opts.mu.Lock() + fmt.Fprintln(Opts.LogBuf, header, "Inconsistent locking. saw this ordering in one goroutine:") + fmt.Fprintln(Opts.LogBuf, "happened before") + printStack(Opts.LogBuf, s.before) + fmt.Fprintln(Opts.LogBuf, "happened after") + printStack(Opts.LogBuf, s.after) + fmt.Fprintln(Opts.LogBuf, "in another goroutine: happened before") + printStack(Opts.LogBuf, bs.stack) + fmt.Fprintln(Opts.LogBuf, "happened after") + printStack(Opts.LogBuf, stack) + l.other(p) + fmt.Fprintln(Opts.LogBuf) + if buf, ok := Opts.LogBuf.(*bufio.Writer); ok { + buf.Flush() + } + Opts.mu.Unlock() + Opts.OnPotentialDeadlock() + } + l.order[beforeAfter{b, p}] = ss{bs.stack, stack} + if len(l.order) == Opts.MaxMapSize { // Reset the map to keep memory footprint bounded. + l.order = map[beforeAfter]ss{} + } + } + l.mu.Unlock() +} + +func (l *lockOrder) postUnlock(p interface{}) { + l.mu.Lock() + delete(l.cur, p) + l.mu.Unlock() +} + +type rlocker RWMutex + +func (r *rlocker) Lock() { (*RWMutex)(r).RLock() } +func (r *rlocker) Unlock() { (*RWMutex)(r).RUnlock() } + +// Under lo.mu Locked. +func (l *lockOrder) other(ptr interface{}) { + empty := true + for k := range l.cur { + if k == ptr { + continue + } + empty = false + } + if empty { + return + } + fmt.Fprintln(Opts.LogBuf, "Other goroutines holding locks:") + for k, pp := range l.cur { + if k == ptr { + continue + } + fmt.Fprintf(Opts.LogBuf, "goroutine %v lock %p\n", pp.gid, k) + printStack(Opts.LogBuf, pp.stack) + } + fmt.Fprintln(Opts.LogBuf) +} + +const header = "POTENTIAL DEADLOCK:" diff --git a/vendor/github.com/sasha-s/go-deadlock/deadlock_map.go b/vendor/github.com/sasha-s/go-deadlock/deadlock_map.go new file mode 100644 index 000000000000..ec66bdc0f409 --- /dev/null +++ b/vendor/github.com/sasha-s/go-deadlock/deadlock_map.go @@ -0,0 +1,10 @@ +// +build go1.9 + +package deadlock + +import "sync" + +// Map is sync.Map wrapper +type Map struct { + sync.Map +} diff --git a/vendor/github.com/sasha-s/go-deadlock/stacktraces.go b/vendor/github.com/sasha-s/go-deadlock/stacktraces.go new file mode 100644 index 000000000000..d93050fcde5b --- /dev/null +++ b/vendor/github.com/sasha-s/go-deadlock/stacktraces.go @@ -0,0 +1,107 @@ +package deadlock + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "os/user" + "path/filepath" + "runtime" + "strings" + "sync" +) + +func callers(skip int) []uintptr { + s := make([]uintptr, 50) // Most relevant context seem to appear near the top of the stack. + return s[:runtime.Callers(2+skip, s)] +} + +func printStack(w io.Writer, stack []uintptr) { + home := os.Getenv("HOME") + usr, err := user.Current() + if err == nil { + home = usr.HomeDir + } + cwd, _ := os.Getwd() + + for i, pc := range stack { + f := runtime.FuncForPC(pc) + name := f.Name() + pkg := "" + if pos := strings.LastIndex(name, "/"); pos >= 0 { + name = name[pos+1:] + } + if pos := strings.Index(name, "."); pos >= 0 { + pkg = name[:pos] + name = name[pos+1:] + } + file, line := f.FileLine(pc) + if (pkg == "runtime" && name == "goexit") || (pkg == "testing" && name == "tRunner") { + fmt.Fprintln(w) + return + } + tail := "" + if i == 0 { + tail = " <<<<<" // Make the line performing a lock prominent. + } + // Shorten the file name. + clean := file + if cwd != "" { + cl, err := filepath.Rel(cwd, file) + if err == nil { + clean = cl + } + } + if home != "" { + s2 := strings.Replace(file, home, "~", 1) + if len(clean) > len(s2) { + clean = s2 + } + } + fmt.Fprintf(w, "%s:%d %s.%s %s%s\n", clean, line-1, pkg, name, code(file, line), tail) + } + fmt.Fprintln(w) +} + +var fileSources struct { + sync.Mutex + lines map[string][][]byte +} + +// Reads souce file lines from disk if not cached already. +func getSourceLines(file string) [][]byte { + fileSources.Lock() + defer fileSources.Unlock() + if fileSources.lines == nil { + fileSources.lines = map[string][][]byte{} + } + if lines, ok := fileSources.lines[file]; ok { + return lines + } + text, _ := ioutil.ReadFile(file) + fileSources.lines[file] = bytes.Split(text, []byte{'\n'}) + return fileSources.lines[file] +} + +func code(file string, line int) string { + lines := getSourceLines(file) + line -= 2 + if line >= len(lines) || line < 0 { + return "???" + } + return "{ " + string(bytes.TrimSpace(lines[line])) + " }" +} + +// Stacktraces for all goroutines. +func stacks() []byte { + buf := make([]byte, 1024*16) + for { + n := runtime.Stack(buf, true) + if n < len(buf) { + return buf[:n] + } + buf = make([]byte, 2*len(buf)) + } +} diff --git a/vendor/github.com/sasha-s/go-deadlock/test.sh b/vendor/github.com/sasha-s/go-deadlock/test.sh new file mode 100644 index 000000000000..9c9da85cd9da --- /dev/null +++ b/vendor/github.com/sasha-s/go-deadlock/test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./...); do + go test -bench=. -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/sasha-s/go-deadlock/trylock.go b/vendor/github.com/sasha-s/go-deadlock/trylock.go new file mode 100644 index 000000000000..e8a6775b413c --- /dev/null +++ b/vendor/github.com/sasha-s/go-deadlock/trylock.go @@ -0,0 +1,39 @@ +// +build go1.18 + +package deadlock + +// TryLock tries to lock the mutex. +// Returns false if the lock is already in use, true otherwise. +func (m *Mutex) TryLock() bool { + return trylock(m.mu.TryLock, m) +} + +// TryLock tries to lock rw for writing. +// Returns false if the lock is already locked for reading or writing, true otherwise. +func (m *RWMutex) TryLock() bool { + return trylock(m.mu.TryLock, m) +} + +// TryRLock tries to lock rw for reading. +// Returns false if the lock is already locked for writing, true otherwise. +func (m *RWMutex) TryRLock() bool { + return trylock(m.mu.TryRLock, m) +} + +// trylock can not deadlock, so there is no deadlock detection. +// lock ordering is still supported by calling into preLock/postLock, +// and in failed attempt into postUnlock to unroll the state added by preLock. +func trylock(lockFn func() bool, ptr interface{}) bool { + if Opts.Disable { + return lockFn() + } + stack := callers(1) + preLock(stack, ptr) + ret := lockFn() + if ret { + postLock(stack, ptr) + } else { + postUnlock(ptr) + } + return ret +} diff --git a/vendor/modules.txt b/vendor/modules.txt index ab04210c5eaa..ac5052f07022 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -394,7 +394,7 @@ github.com/containerd/fifo # github.com/containerd/fuse-overlayfs-snapshotter/v2 v2.1.1 ## explicit; go 1.22.0 github.com/containerd/fuse-overlayfs-snapshotter/v2 -# github.com/containerd/go-cni v1.1.11 +# github.com/containerd/go-cni v1.1.12 ## explicit; go 1.21 github.com/containerd/go-cni # github.com/containerd/go-runc v1.1.0 @@ -704,6 +704,9 @@ github.com/package-url/packageurl-go # github.com/pelletier/go-toml v1.9.5 ## explicit; go 1.12 github.com/pelletier/go-toml +# github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 +## explicit; go 1.17 +github.com/petermattis/goid # github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c ## explicit; go 1.14 github.com/pkg/browser @@ -753,6 +756,9 @@ github.com/prometheus/procfs/internal/util # github.com/russross/blackfriday/v2 v2.1.0 ## explicit github.com/russross/blackfriday/v2 +# github.com/sasha-s/go-deadlock v0.3.5 +## explicit +github.com/sasha-s/go-deadlock # github.com/secure-systems-lab/go-securesystemslib v0.4.0 ## explicit; go 1.17 github.com/secure-systems-lab/go-securesystemslib/cjson From 800f840798ddc2c32329fd1bd9263c05bec9ac92 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Wed, 15 Jan 2025 13:13:03 +0100 Subject: [PATCH 3/5] vendor: stargz snapshotter 1281fc2cd2ea42c23b252bdb06e527e163bfea63 Signed-off-by: Sebastiaan van Stijn --- go.mod | 24 +-- go.sum | 48 ++--- .../stargz-snapshotter/cache/cache.go | 41 +++- .../stargz-snapshotter/fs/config/config.go | 3 + .../containerd/stargz-snapshotter/fs/fs.go | 20 +- .../stargz-snapshotter/fs/layer/layer.go | 7 +- .../stargz-snapshotter/fs/layer/node.go | 39 +++- .../stargz-snapshotter/fs/layer/testutil.go | 22 +- .../stargz-snapshotter/fs/reader/reader.go | 152 ++++++++++++- .../stargz-snapshotter/fs/remote/resolver.go | 8 +- .../stargz-snapshotter/snapshot/snapshot.go | 26 +++ vendor/github.com/hanwen/go-fuse/v2/fs/api.go | 8 + .../github.com/hanwen/go-fuse/v2/fs/bridge.go | 4 +- .../hanwen/go-fuse/v2/fs/bridge_linux.go | 60 ++++++ .../hanwen/go-fuse/v2/fs/bridge_nonlinux.go | 9 + .../hanwen/go-fuse/v2/fs/files_linux.go | 27 +++ .../hanwen/go-fuse/v2/fs/loopback_linux.go | 24 +++ .../github.com/hanwen/go-fuse/v2/fuse/api.go | 7 + .../hanwen/go-fuse/v2/fuse/attr_linux.go | 24 +++ .../hanwen/go-fuse/v2/fuse/bufferpool.go | 20 +- .../hanwen/go-fuse/v2/fuse/defaultraw.go | 4 + .../hanwen/go-fuse/v2/fuse/opcode.go | 93 ++++---- .../hanwen/go-fuse/v2/fuse/opcode_linux.go | 26 +++ .../github.com/hanwen/go-fuse/v2/fuse/poll.go | 2 +- .../hanwen/go-fuse/v2/fuse/print.go | 6 + .../hanwen/go-fuse/v2/fuse/print_linux.go | 60 ++++++ .../hanwen/go-fuse/v2/fuse/protocol-server.go | 133 ++++++++++++ .../hanwen/go-fuse/v2/fuse/request.go | 17 +- .../hanwen/go-fuse/v2/fuse/request_linux.go | 2 +- .../hanwen/go-fuse/v2/fuse/server.go | 204 ++++-------------- .../hanwen/go-fuse/v2/fuse/server_linux.go | 2 +- .../hanwen/go-fuse/v2/fuse/server_unix.go | 2 +- .../hanwen/go-fuse/v2/fuse/splice_linux.go | 2 +- .../hanwen/go-fuse/v2/fuse/types.go | 67 ++++++ .../hanwen/go-fuse/v2/fuse/types_linux.go | 7 + .../vbatts/tar-split/archive/tar/format.go | 4 + .../vbatts/tar-split/archive/tar/reader.go | 14 +- vendor/go.opentelemetry.io/otel/.gitignore | 8 - vendor/go.opentelemetry.io/otel/.golangci.yml | 2 - vendor/go.opentelemetry.io/otel/CHANGELOG.md | 49 ++++- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 18 +- vendor/go.opentelemetry.io/otel/Makefile | 2 +- .../otel/internal/global/instruments.go | 14 +- .../otel/internal/global/meter.go | 69 +++++- .../otel/internal/global/trace.go | 8 +- .../otel/sdk/instrumentation/scope.go | 4 + .../otel/sdk/metric/config.go | 72 +++++-- .../otel/sdk/metric/exemplar.go | 61 +++--- .../otel/sdk/metric/exemplar/filter.go | 5 + .../metric/exemplar/fixed_size_reservoir.go | 7 + .../metric/exemplar/histogram_reservoir.go | 12 +- .../otel/sdk/metric/exemplar/reservoir.go | 8 + .../otel/sdk/metric/instrument.go | 6 + .../metric/internal/aggregate/aggregate.go | 8 +- .../sdk/metric/internal/aggregate/drop.go | 7 +- .../sdk/metric/internal/aggregate/exemplar.go | 1 + .../aggregate/exponential_histogram.go | 6 +- .../metric/internal/aggregate/histogram.go | 8 +- .../metric/internal/aggregate/lastvalue.go | 8 +- .../otel/sdk/metric/internal/aggregate/sum.go | 10 +- .../otel/sdk/metric/manual_reader.go | 9 +- .../otel/sdk/metric/meter.go | 105 ++++----- .../otel/sdk/metric/periodic_reader.go | 9 +- .../otel/sdk/metric/pipeline.go | 132 ++++++------ .../otel/sdk/metric/provider.go | 10 +- .../otel/sdk/metric/version.go | 2 +- .../otel/sdk/metric/view.go | 11 +- .../otel/sdk/resource/auto.go | 62 ++---- .../otel/sdk/resource/builtin.go | 6 +- .../otel/sdk/trace/batch_span_processor.go | 1 + .../otel/sdk/trace/provider.go | 9 +- .../otel/sdk/trace/span.go | 5 +- .../go.opentelemetry.io/otel/sdk/version.go | 2 +- vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 15 +- .../pickfirst/pickfirstleaf/pickfirstleaf.go | 27 ++- .../grpc/balancer_wrapper.go | 73 ++++++- .../grpc_binarylog_v1/binarylog.pb.go | 2 +- .../google.golang.org/grpc/credentials/tls.go | 6 +- vendor/google.golang.org/grpc/dialoptions.go | 5 + .../grpc/health/grpc_health_v1/health.pb.go | 2 +- .../google.golang.org/grpc/health/producer.go | 106 +++++++++ .../grpc/internal/envconfig/envconfig.go | 2 +- .../grpc/internal/envconfig/xds.go | 6 + .../grpc/internal/internal.go | 4 + .../grpc/internal/transport/handler_server.go | 2 +- .../grpc/internal/transport/http2_server.go | 4 +- .../grpc_reflection_v1/reflection.pb.go | 2 +- .../grpc_reflection_v1alpha/reflection.pb.go | 2 +- vendor/google.golang.org/grpc/server.go | 10 +- .../google.golang.org/grpc/service_config.go | 17 +- vendor/google.golang.org/grpc/stream.go | 2 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/modules.txt | 24 +-- 94 files changed, 1634 insertions(+), 655 deletions(-) create mode 100644 vendor/github.com/hanwen/go-fuse/v2/fs/bridge_linux.go create mode 100644 vendor/github.com/hanwen/go-fuse/v2/fs/bridge_nonlinux.go create mode 100644 vendor/github.com/hanwen/go-fuse/v2/fuse/opcode_linux.go create mode 100644 vendor/github.com/hanwen/go-fuse/v2/fuse/protocol-server.go create mode 100644 vendor/google.golang.org/grpc/health/producer.go diff --git a/go.mod b/go.mod index a05a77623fdc..a266840187bd 100644 --- a/go.mod +++ b/go.mod @@ -25,8 +25,8 @@ require ( github.com/containerd/log v0.1.0 github.com/containerd/nydus-snapshotter v0.15.0 github.com/containerd/platforms v1.0.0-rc.1 - github.com/containerd/stargz-snapshotter v0.16.3 - github.com/containerd/stargz-snapshotter/estargz v0.16.3 + github.com/containerd/stargz-snapshotter v0.16.2-0.20250126124854-1281fc2cd2ea + github.com/containerd/stargz-snapshotter/estargz v0.16.2-0.20250126124854-1281fc2cd2ea github.com/containerd/typeurl/v2 v2.2.3 github.com/containernetworking/plugins v1.5.1 github.com/coreos/go-systemd/v22 v22.5.0 @@ -83,7 +83,7 @@ require ( go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 - go.opentelemetry.io/otel v1.31.0 + go.opentelemetry.io/otel v1.32.0 go.opentelemetry.io/otel/exporters/jaeger v1.17.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.31.0 @@ -91,9 +91,9 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 go.opentelemetry.io/otel/exporters/prometheus v0.42.0 - go.opentelemetry.io/otel/sdk v1.31.0 - go.opentelemetry.io/otel/sdk/metric v1.31.0 - go.opentelemetry.io/otel/trace v1.31.0 + go.opentelemetry.io/otel/sdk v1.32.0 + go.opentelemetry.io/otel/sdk/metric v1.32.0 + go.opentelemetry.io/otel/trace v1.32.0 go.opentelemetry.io/proto/otlp v1.3.1 golang.org/x/crypto v0.31.0 golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 @@ -102,8 +102,8 @@ require ( golang.org/x/sync v0.10.0 golang.org/x/sys v0.29.0 golang.org/x/time v0.6.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 - google.golang.org/grpc v1.69.4 + google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a + google.golang.org/grpc v1.70.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 google.golang.org/protobuf v1.35.2 kernel.org/pub/linux/libs/security/libcap/cap v1.2.73 @@ -155,7 +155,7 @@ require ( github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 // indirect github.com/google/uuid v1.6.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect - github.com/hanwen/go-fuse/v2 v2.6.3 // indirect + github.com/hanwen/go-fuse/v2 v2.7.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-retryablehttp v0.7.7 // indirect github.com/kylelemons/godebug v1.1.0 // indirect @@ -174,12 +174,12 @@ require ( github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect - github.com/vbatts/tar-split v0.11.6 // indirect + github.com/vbatts/tar-split v0.11.7 // indirect github.com/vishvananda/netns v0.0.4 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel/metric v1.31.0 // indirect + go.opentelemetry.io/otel/metric v1.32.0 // indirect golang.org/x/text v0.21.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241202173237-19429a94021a // indirect gopkg.in/yaml.v3 v3.0.1 // indirect kernel.org/pub/linux/libs/security/libcap/psx v1.2.73 // indirect sigs.k8s.io/yaml v1.4.0 // indirect diff --git a/go.sum b/go.sum index 307c0a547daf..d4ae0e2e8030 100644 --- a/go.sum +++ b/go.sum @@ -115,10 +115,10 @@ github.com/containerd/platforms v1.0.0-rc.1 h1:83KIq4yy1erSRgOVHNk1HYdPvzdJ5CnsW github.com/containerd/platforms v1.0.0-rc.1/go.mod h1:J71L7B+aiM5SdIEqmd9wp6THLVRzJGXfNuWCZCllLA4= github.com/containerd/plugin v1.0.0 h1:c8Kf1TNl6+e2TtMHZt+39yAPDbouRH9WAToRjex483Y= github.com/containerd/plugin v1.0.0/go.mod h1:hQfJe5nmWfImiqT1q8Si3jLv3ynMUIBB47bQ+KexvO8= -github.com/containerd/stargz-snapshotter v0.16.3 h1:zbQMm8dRuPHEOD4OqAYGajJJUwCeUzt4j7w9Iaw58u4= -github.com/containerd/stargz-snapshotter v0.16.3/go.mod h1:XPOl2oa9zjWidTM2IX191smolwWc3/zkKtp02TzTFb0= -github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8= -github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= +github.com/containerd/stargz-snapshotter v0.16.2-0.20250126124854-1281fc2cd2ea h1:/zz7qevn5j4cOV1y2tDMp2/Kym0g0h+A9qrtLRuidHM= +github.com/containerd/stargz-snapshotter v0.16.2-0.20250126124854-1281fc2cd2ea/go.mod h1:MH+QfxSkJJ3qvLKLCpg686qmtQnoi0Jl2ICFR9pHOUI= +github.com/containerd/stargz-snapshotter/estargz v0.16.2-0.20250126124854-1281fc2cd2ea h1:5myRplbVgzXsssYUf3DjOXslZR7AaSNYHpen0GLQZYQ= +github.com/containerd/stargz-snapshotter/estargz v0.16.2-0.20250126124854-1281fc2cd2ea/go.mod h1:YgLRxRPPqA9ccxTLauFOLzYH1T6q+nZS675kZM4dAfk= github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRqQ= github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= @@ -219,8 +219,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= -github.com/hanwen/go-fuse/v2 v2.6.3 h1:tDcEkLRx93lXu4XyN1/j8Z74VWvhHDl6qU1kNnvFUqI= -github.com/hanwen/go-fuse/v2 v2.6.3/go.mod h1:ugNaD/iv5JYyS1Rcvi57Wz7/vrLQJo10mmketmoef48= +github.com/hanwen/go-fuse/v2 v2.7.2 h1:SbJP1sUP+n1UF8NXBA14BuojmTez+mDgOk0bC057HQw= +github.com/hanwen/go-fuse/v2 v2.7.2/go.mod h1:ugNaD/iv5JYyS1Rcvi57Wz7/vrLQJo10mmketmoef48= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -402,8 +402,8 @@ github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab/go.mod h1:ulncasL github.com/urfave/cli v1.19.1/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.16 h1:MH0k6uJxdwdeWQTwhSO42Pwr4YLrNLwBtg1MRgTqPdQ= github.com/urfave/cli v1.22.16/go.mod h1:EeJR6BKodywf4zciqrdw6hpCPk68JO9z5LazXZMn5Po= -github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs= -github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI= +github.com/vbatts/tar-split v0.11.7 h1:ixZ93pO/GmvaZw4Vq9OwmfZK/kc2zKdPfu0B+gYqs3U= +github.com/vbatts/tar-split v0.11.7/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= github.com/vishvananda/netlink v1.3.1-0.20240922070040-084abd93d350 h1:w5OI+kArIBVksl8UGn6ARQshtPCQvDsbuA9NQie3GIg= github.com/vishvananda/netlink v1.3.1-0.20240922070040-084abd93d350/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= @@ -427,8 +427,8 @@ go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0. go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0/go.mod h1:3qi2EEwMgB4xnKgPLqsDP3j9qxnHDZeHsnAxfjQqTko= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= -go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= -go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4= go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0 h1:FZ6ei8GFW7kyPYdxJaV2rgI6M+4tvZzhYsQ2wgyVC08= @@ -443,14 +443,14 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 h1:lUsI2 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0/go.mod h1:2HpZxxQurfGxJlJDblybejHB6RX6pmExPNe517hREw4= go.opentelemetry.io/otel/exporters/prometheus v0.42.0 h1:jwV9iQdvp38fxXi8ZC+lNpxjK16MRcZlpDYvbuO1FiA= go.opentelemetry.io/otel/exporters/prometheus v0.42.0/go.mod h1:f3bYiqNqhoPxkvI2LrXqQVC546K7BuRDL/kKuxkujhA= -go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= -go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= -go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= -go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= -go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= -go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= -go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= -go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= +go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= +go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -536,17 +536,17 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U= -google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto/googleapis/api v0.0.0-20241202173237-19429a94021a h1:OAiGFfOiA0v9MRYsSidp3ubZaBnteRUyn3xB2ZQ5G/E= +google.golang.org/genproto/googleapis/api v0.0.0-20241202173237-19429a94021a/go.mod h1:jehYqy3+AhJU9ve55aNOaSml7wUXjF9x6z2LcCfpAhY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a h1:hgh8P4EuoxpsuKMXX/To36nOFD7vixReXgn8lPGnt+o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= -google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ= +google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 h1:F29+wU6Ee6qgu9TddPgooOdaqsxTMunOoj8KA5yuS5A= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1/go.mod h1:5KF+wpkbTSbGcR9zteSqZV6fqFOWBl4Yde8En8MryZA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= diff --git a/vendor/github.com/containerd/stargz-snapshotter/cache/cache.go b/vendor/github.com/containerd/stargz-snapshotter/cache/cache.go index 417ae4b2b822..b1aa16cb55d2 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/cache/cache.go +++ b/vendor/github.com/containerd/stargz-snapshotter/cache/cache.go @@ -18,6 +18,7 @@ package cache import ( "bytes" + "errors" "fmt" "io" "os" @@ -26,7 +27,6 @@ import ( "github.com/containerd/stargz-snapshotter/util/cacheutil" "github.com/containerd/stargz-snapshotter/util/namedmutex" - "github.com/hashicorp/go-multierror" ) const ( @@ -82,6 +82,9 @@ type BlobCache interface { type Reader interface { io.ReaderAt Close() error + + // If a blob is backed by a file, it should return *os.File so that it can be used for FUSE passthrough + GetReaderAt() io.ReaderAt } // Writer enables the client to cache byte data. Commit() must be @@ -94,7 +97,8 @@ type Writer interface { } type cacheOpt struct { - direct bool + direct bool + passThrough bool } type Option func(o *cacheOpt) *cacheOpt @@ -110,6 +114,15 @@ func Direct() Option { } } +// PassThrough option indicates whether to enable FUSE passthrough mode +// to improve local file read performance. +func PassThrough() Option { + return func(o *cacheOpt) *cacheOpt { + o.passThrough = true + return o + } +} + func NewDirectoryCache(directory string, config DirectoryCacheConfig) (BlobCache, error) { if !filepath.IsAbs(directory) { return nil, fmt.Errorf("dir cache path must be an absolute path; got %q", directory) @@ -229,8 +242,16 @@ func (dc *directoryCache) Get(key string, opts ...Option) (Reader, error) { // that won't be accessed immediately. if dc.direct || opt.direct { return &reader{ - ReaderAt: file, - closeFunc: func() error { return file.Close() }, + ReaderAt: file, + closeFunc: func() error { + // In passthough model, close will be toke over by go-fuse + // If "passThrough" option is specified, "direct" option also will + // be specified, so adding this branch here is enough + if opt.passThrough { + return nil + } + return file.Close() + }, }, nil } @@ -273,12 +294,12 @@ func (dc *directoryCache) Add(key string, opts ...Option) (Writer, error) { // Commit the cache contents c := dc.cachePath(key) if err := os.MkdirAll(filepath.Dir(c), os.ModePerm); err != nil { - var allErr error + var errs []error if err := os.Remove(wip.Name()); err != nil { - allErr = multierror.Append(allErr, err) + errs = append(errs, err) } - return multierror.Append(allErr, - fmt.Errorf("failed to create cache directory %q: %w", c, err)) + errs = append(errs, fmt.Errorf("failed to create cache directory %q: %w", c, err)) + return errors.Join(errs...) } return os.Rename(wip.Name(), c) }, @@ -414,6 +435,10 @@ type reader struct { func (r *reader) Close() error { return r.closeFunc() } +func (r *reader) GetReaderAt() io.ReaderAt { + return r.ReaderAt +} + type writer struct { io.WriteCloser commitFunc func() error diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/config/config.go b/vendor/github.com/containerd/stargz-snapshotter/fs/config/config.go index 890aded7400b..44b87e8d6c24 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/fs/config/config.go +++ b/vendor/github.com/containerd/stargz-snapshotter/fs/config/config.go @@ -148,4 +148,7 @@ type FuseConfig struct { // EntryTimeout defines TTL for directory, name lookup in seconds. EntryTimeout int64 `toml:"entry_timeout"` + + // PassThrough indicates whether to enable FUSE passthrough mode to improve local file read performance. Default is false. + PassThrough bool `toml:"passthrough" default:"false"` } diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/fs.go b/vendor/github.com/containerd/stargz-snapshotter/fs/fs.go index 47191e99ac6d..4547ae32ec7d 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/fs/fs.go +++ b/vendor/github.com/containerd/stargz-snapshotter/fs/fs.go @@ -72,6 +72,12 @@ const ( ) var fusermountBin = []string{"fusermount", "fusermount3"} +var ( + nsLock = sync.Mutex{} + + ns *metrics.Namespace + metricsCtr *layermetrics.Controller +) type Option func(*options) @@ -160,18 +166,20 @@ func NewFilesystem(root string, cfg config.Config, opts ...Option) (_ snapshot.F return nil, fmt.Errorf("failed to setup resolver: %w", err) } - var ns *metrics.Namespace - if !cfg.NoPrometheus { + nsLock.Lock() + defer nsLock.Unlock() + + if !cfg.NoPrometheus && ns == nil { ns = metrics.NewNamespace("stargz", "fs", nil) logLevel := log.DebugLevel if fsOpts.metricsLogLevel != nil { logLevel = *fsOpts.metricsLogLevel } commonmetrics.Register(logLevel) // Register common metrics. This will happen only once. + metrics.Register(ns) // Register layer metrics. } - c := layermetrics.NewLayerMetrics(ns) - if ns != nil { - metrics.Register(ns) // Register layer metrics. + if metricsCtr == nil { + metricsCtr = layermetrics.NewLayerMetrics(ns) } return &filesystem{ @@ -185,7 +193,7 @@ func NewFilesystem(root string, cfg config.Config, opts ...Option) (_ snapshot.F backgroundTaskManager: tm, allowNoVerification: cfg.AllowNoVerification, disableVerification: cfg.DisableVerification, - metricsController: c, + metricsController: metricsCtr, attrTimeout: attrTimeout, entryTimeout: entryTimeout, }, nil diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/layer/layer.go b/vendor/github.com/containerd/stargz-snapshotter/fs/layer/layer.go index a71848dbbc37..4efc32b24765 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/fs/layer/layer.go +++ b/vendor/github.com/containerd/stargz-snapshotter/fs/layer/layer.go @@ -315,7 +315,7 @@ func (r *Resolver) Resolve(ctx context.Context, hosts source.RegistryHosts, refs } // Combine layer information together and cache it. - l := newLayer(r, desc, blobR, vr) + l := newLayer(r, desc, blobR, vr, r.config.FuseConfig.PassThrough) r.layerCacheMu.Lock() cachedL, done2, added := r.layerCache.Add(name, l) r.layerCacheMu.Unlock() @@ -375,6 +375,7 @@ func newLayer( desc ocispec.Descriptor, blob *blobRef, vr *reader.VerifiableReader, + pth bool, ) *layer { return &layer{ resolver: resolver, @@ -382,6 +383,7 @@ func newLayer( blob: blob, verifiableReader: vr, prefetchWaiter: newWaiter(), + passThrough: pth, } } @@ -402,6 +404,7 @@ type layer struct { prefetchOnce sync.Once backgroundFetchOnce sync.Once + passThrough bool } func (l *layer) Info() Info { @@ -583,7 +586,7 @@ func (l *layer) RootNode(baseInode uint32) (fusefs.InodeEmbedder, error) { if l.r == nil { return nil, fmt.Errorf("layer hasn't been verified yet") } - return newNode(l.desc.Digest, l.r, l.blob, baseInode, l.resolver.overlayOpaqueType) + return newNode(l.desc.Digest, l.r, l.blob, baseInode, l.resolver.overlayOpaqueType, l.passThrough) } func (l *layer) ReadAt(p []byte, offset int64, opts ...remote.Option) (int, error) { diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/layer/node.go b/vendor/github.com/containerd/stargz-snapshotter/fs/layer/node.go index b6306b9b5955..36d342529b56 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/fs/layer/node.go +++ b/vendor/github.com/containerd/stargz-snapshotter/fs/layer/node.go @@ -76,7 +76,7 @@ var opaqueXattrs = map[OverlayOpaqueType][]string{ OverlayOpaqueUser: {"user.overlay.opaque"}, } -func newNode(layerDgst digest.Digest, r reader.Reader, blob remote.Blob, baseInode uint32, opaque OverlayOpaqueType) (fusefs.InodeEmbedder, error) { +func newNode(layerDgst digest.Digest, r reader.Reader, blob remote.Blob, baseInode uint32, opaque OverlayOpaqueType, pth bool) (fusefs.InodeEmbedder, error) { rootID := r.Metadata().RootID() rootAttr, err := r.Metadata().GetAttr(rootID) if err != nil { @@ -92,6 +92,7 @@ func newNode(layerDgst digest.Digest, r reader.Reader, blob remote.Blob, baseIno baseInode: baseInode, rootID: rootID, opaqueXattrs: opq, + passThrough: pth, } ffs.s = ffs.newState(layerDgst, blob) return &node{ @@ -109,6 +110,7 @@ type fs struct { baseInode uint32 rootID uint32 opaqueXattrs []string + passThrough bool } func (fs *fs) inodeOfState() uint64 { @@ -344,10 +346,26 @@ func (n *node) Open(ctx context.Context, flags uint32) (fh fusefs.FileHandle, fu n.fs.s.report(fmt.Errorf("node.Open: %v", err)) return nil, 0, syscall.EIO } - return &file{ + + f := &file{ n: n, ra: ra, - }, fuse.FOPEN_KEEP_CACHE, 0 + fd: -1, + } + + if n.fs.passThrough { + if getter, ok := ra.(reader.PassthroughFdGetter); ok { + fd, err := getter.GetPassthroughFd() + if err != nil { + n.fs.s.report(fmt.Errorf("passThrough model failed due to node.Open: %v", err)) + n.fs.passThrough = false + } else { + f.InitFd(int(fd)) + } + } + } + + return f, fuse.FOPEN_KEEP_CACHE, 0 } var _ = (fusefs.NodeGetattrer)((*node)(nil)) @@ -424,6 +442,7 @@ func (n *node) Statfs(ctx context.Context, out *fuse.StatfsOut) syscall.Errno { type file struct { n *node ra io.ReaderAt + fd int } var _ = (fusefs.FileReader)((*file)(nil)) @@ -451,6 +470,20 @@ func (f *file) Getattr(ctx context.Context, out *fuse.AttrOut) syscall.Errno { return 0 } +// Implement PassthroughFd to enable go-fuse passthrough +var _ = (fusefs.FilePassthroughFder)((*file)(nil)) + +func (f *file) PassthroughFd() (int, bool) { + if f.fd <= 0 { + return -1, false + } + return f.fd, true +} + +func (f *file) InitFd(fd int) { + f.fd = fd +} + // whiteout is a whiteout abstraction compliant to overlayfs. type whiteout struct { fusefs.Inode diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/layer/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/fs/layer/testutil.go index 5a38825d004d..3729340c42b4 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/fs/layer/testutil.go +++ b/vendor/github.com/containerd/stargz-snapshotter/fs/layer/testutil.go @@ -74,6 +74,7 @@ var srcCompressions = map[string]tutil.CompressionFactory{ func TestSuiteLayer(t *testing.T, store metadata.Store) { testPrefetch(t, store) testNodeRead(t, store) + testPassThroughRead(t, store) testNodes(t, store) } @@ -218,6 +219,7 @@ func testPrefetch(t *testing.T, factory metadata.Store) { ocispec.Descriptor{Digest: testStateLayerDigest}, &blobRef{blob, func() {}}, vr, + false, ) if err := l.Verify(dgst); err != nil { t.Errorf("failed to verify reader: %v", err) @@ -379,7 +381,15 @@ const ( lastChunkOffset1 = sampleChunkSize * (int64(len(sampleData1)) / sampleChunkSize) ) +func testPassThroughRead(t *testing.T, factory metadata.Store) { + nodeRead(t, factory, true) +} + func testNodeRead(t *testing.T, factory metadata.Store) { + nodeRead(t, factory, false) +} + +func nodeRead(t *testing.T, factory metadata.Store, pth bool) { sizeCond := map[string]int64{ "single_chunk": sampleChunkSize - sampleMiddleOffset, "multi_chunks": sampleChunkSize + sampleMiddleOffset, @@ -428,7 +438,7 @@ func testNodeRead(t *testing.T, factory metadata.Store) { } // data we get from the file node. - f, closeFn := makeNodeReader(t, []byte(sampleData1)[:filesize], sampleChunkSize, factory, cl) + f, closeFn := makeNodeReader(t, []byte(sampleData1)[:filesize], sampleChunkSize, factory, cl, pth) defer closeFn() tmpbuf := make([]byte, size) // fuse library can request bigger than remain rr, errno := f.Read(context.Background(), tmpbuf, offset) @@ -459,7 +469,7 @@ func testNodeRead(t *testing.T, factory metadata.Store) { } } -func makeNodeReader(t *testing.T, contents []byte, chunkSize int, factory metadata.Store, cl tutil.Compression) (_ *file, closeFn func() error) { +func makeNodeReader(t *testing.T, contents []byte, chunkSize int, factory metadata.Store, cl tutil.Compression, pth bool) (_ *file, closeFn func() error) { testName := "test" sr, tocDgst, err := tutil.BuildEStargz( []tutil.TarEntry{tutil.File(testName, string(contents))}, @@ -472,7 +482,7 @@ func makeNodeReader(t *testing.T, contents []byte, chunkSize int, factory metada if err != nil { t.Fatalf("failed to create reader: %v", err) } - rootNode := getRootNode(t, r, OverlayOpaqueAll, tocDgst, cache.NewMemoryCache()) + rootNode := getRootNode(t, r, OverlayOpaqueAll, tocDgst, cache.NewMemoryCache(), pth) var eo fuse.EntryOut inode, errno := rootNode.Lookup(context.Background(), testName, &eo) if errno != 0 { @@ -724,7 +734,7 @@ func testNodesWithOpaque(t *testing.T, factory metadata.Store, opaque OverlayOpa } defer r.Close() mcache := cache.NewMemoryCache() - rootNode := getRootNode(t, r, opaque, tocDgst, mcache) + rootNode := getRootNode(t, r, opaque, tocDgst, mcache, false) for _, want := range tt.want { want(t, rootNode, mcache, testR) } @@ -733,7 +743,7 @@ func testNodesWithOpaque(t *testing.T, factory metadata.Store, opaque OverlayOpa } } -func getRootNode(t *testing.T, r metadata.Reader, opaque OverlayOpaqueType, tocDgst digest.Digest, cc cache.BlobCache) *node { +func getRootNode(t *testing.T, r metadata.Reader, opaque OverlayOpaqueType, tocDgst digest.Digest, cc cache.BlobCache, pth bool) *node { vr, err := reader.NewReader(r, cc, digest.FromString("")) if err != nil { t.Fatalf("failed to create reader: %v", err) @@ -742,7 +752,7 @@ func getRootNode(t *testing.T, r metadata.Reader, opaque OverlayOpaqueType, tocD if err != nil { t.Fatalf("failed to verify reader: %v", err) } - rootNode, err := newNode(testStateLayerDigest, rr, &testBlobState{10, 5}, 100, opaque) + rootNode, err := newNode(testStateLayerDigest, rr, &testBlobState{10, 5}, 100, opaque, pth) if err != nil { t.Fatalf("failed to get root node: %v", err) } diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/reader/reader.go b/vendor/github.com/containerd/stargz-snapshotter/fs/reader/reader.go index c860b87dec4e..06c744c66c81 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/fs/reader/reader.go +++ b/vendor/github.com/containerd/stargz-snapshotter/fs/reader/reader.go @@ -27,6 +27,7 @@ import ( "bytes" "context" "crypto/sha256" + "errors" "fmt" "io" "os" @@ -38,7 +39,6 @@ import ( "github.com/containerd/stargz-snapshotter/estargz" commonmetrics "github.com/containerd/stargz-snapshotter/fs/metrics/common" "github.com/containerd/stargz-snapshotter/metadata" - "github.com/hashicorp/go-multierror" digest "github.com/opencontainers/go-digest" "golang.org/x/sync/errgroup" "golang.org/x/sync/semaphore" @@ -53,6 +53,10 @@ type Reader interface { LastOnDemandReadTime() time.Time } +type PassthroughFdGetter interface { + GetPassthroughFd() (uintptr, error) +} + // VerifiableReader produces a Reader with a given verifier. type VerifiableReader struct { r *reader @@ -386,20 +390,21 @@ func (gr *reader) OpenFile(id uint32) (io.ReaderAt, error) { }, nil } -func (gr *reader) Close() (retErr error) { +func (gr *reader) Close() error { gr.closedMu.Lock() defer gr.closedMu.Unlock() if gr.closed { return nil } gr.closed = true + var errs []error if err := gr.cache.Close(); err != nil { - retErr = multierror.Append(retErr, err) + errs = append(errs, err) } if err := gr.r.Close(); err != nil { - retErr = multierror.Append(retErr, err) + errs = append(errs, err) } - return + return errors.Join(errs...) } func (gr *reader) isClosed() bool { @@ -490,18 +495,137 @@ func (sf *file) ReadAt(p []byte, offset int64) (int, error) { return nr, nil } -func (gr *reader) verifyAndCache(entryID uint32, ip []byte, chunkDigestStr string, cacheID string) error { +func (sf *file) GetPassthroughFd() (uintptr, error) { + var ( + offset int64 + firstChunkOffset int64 = -1 + totalSize int64 + ) + + for { + chunkOffset, chunkSize, _, ok := sf.fr.ChunkEntryForOffset(offset) + if !ok { + break + } + if firstChunkOffset == -1 { + firstChunkOffset = chunkOffset + } + totalSize += chunkSize + offset = chunkOffset + chunkSize + } + + id := genID(sf.id, firstChunkOffset, totalSize) + + // cache.PassThrough() is necessary to take over files + r, err := sf.gr.cache.Get(id, cache.PassThrough()) + if err != nil { + if err := sf.prefetchEntireFile(id); err != nil { + return 0, err + } + + // just retry once to avoid exception stuck + r, err = sf.gr.cache.Get(id, cache.PassThrough()) + if err != nil { + return 0, err + } + } + + readerAt := r.GetReaderAt() + file, ok := readerAt.(*os.File) + if !ok { + r.Close() + return 0, fmt.Errorf("the cached ReaderAt is not of type *os.File, fd obtain failed") + } + + fd := file.Fd() + r.Close() + return fd, nil +} + +func (sf *file) prefetchEntireFile(entireCacheID string) error { + var ( + offset int64 + firstChunkOffset int64 = -1 + totalSize int64 + ) + + w, err := sf.gr.cache.Add(entireCacheID) + if err != nil { + return fmt.Errorf("failed to create cache writer: %w", err) + } + defer w.Close() + + for { + chunkOffset, chunkSize, chunkDigestStr, ok := sf.fr.ChunkEntryForOffset(offset) + if !ok { + break + } + if firstChunkOffset == -1 { + firstChunkOffset = chunkOffset + } + + id := genID(sf.id, chunkOffset, chunkSize) + b := sf.gr.bufPool.Get().(*bytes.Buffer) + b.Reset() + b.Grow(int(chunkSize)) + ip := b.Bytes()[:chunkSize] + + // Check if the content exists in the cache + // Just read it and merge to a new files, so cache.PassThrough() should not be used here + if r, err := sf.gr.cache.Get(id); err == nil { + n, err := r.ReadAt(ip, 0) + if (err == nil || err == io.EOF) && int64(n) == chunkSize { + if _, err := w.Write(ip[:n]); err != nil { + r.Close() + sf.gr.putBuffer(b) + w.Abort() + return fmt.Errorf("failed to write cached data: %w", err) + } + totalSize += int64(n) + offset = chunkOffset + int64(n) + r.Close() + sf.gr.putBuffer(b) + continue + } + r.Close() + } + + // cache miss, prefetch the whole chunk + if _, err := sf.fr.ReadAt(ip, chunkOffset); err != nil && err != io.EOF { + sf.gr.putBuffer(b) + w.Abort() + return fmt.Errorf("failed to read data: %w", err) + } + if err := sf.gr.verifyOneChunk(sf.id, ip, chunkDigestStr); err != nil { + sf.gr.putBuffer(b) + w.Abort() + return err + } + if _, err := w.Write(ip); err != nil { + sf.gr.putBuffer(b) + w.Abort() + return fmt.Errorf("failed to write fetched data: %w", err) + } + totalSize += chunkSize + offset = chunkOffset + chunkSize + sf.gr.putBuffer(b) + } + + return w.Commit() +} + +func (gr *reader) verifyOneChunk(entryID uint32, ip []byte, chunkDigestStr string) error { // We can end up doing on demand registry fetch when aligning the chunk - commonmetrics.IncOperationCount(commonmetrics.OnDemandRemoteRegistryFetchCount, gr.layerSha) // increment the number of on demand file fetches from remote registry - commonmetrics.AddBytesCount(commonmetrics.OnDemandBytesFetched, gr.layerSha, int64(len(ip))) // record total bytes fetched + commonmetrics.IncOperationCount(commonmetrics.OnDemandRemoteRegistryFetchCount, gr.layerSha) + commonmetrics.AddBytesCount(commonmetrics.OnDemandBytesFetched, gr.layerSha, int64(len(ip))) gr.setLastReadTime(time.Now()) - - // Verify this chunk if err := gr.verifyChunk(entryID, ip, chunkDigestStr); err != nil { return fmt.Errorf("invalid chunk: %w", err) } + return nil +} - // Cache this chunk +func (gr *reader) cacheData(ip []byte, cacheID string) { if w, err := gr.cache.Add(cacheID); err == nil { if cn, err := w.Write(ip); err != nil || cn != len(ip) { w.Abort() @@ -510,7 +634,13 @@ func (gr *reader) verifyAndCache(entryID uint32, ip []byte, chunkDigestStr strin } w.Close() } +} +func (gr *reader) verifyAndCache(entryID uint32, ip []byte, chunkDigestStr string, cacheID string) error { + if err := gr.verifyOneChunk(entryID, ip, chunkDigestStr); err != nil { + return err + } + gr.cacheData(ip, cacheID) return nil } diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/remote/resolver.go b/vendor/github.com/containerd/stargz-snapshotter/fs/remote/resolver.go index 46c28d374429..274ae343ab59 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/fs/remote/resolver.go +++ b/vendor/github.com/containerd/stargz-snapshotter/fs/remote/resolver.go @@ -26,6 +26,7 @@ import ( "context" "crypto/rand" "crypto/sha256" + "errors" "fmt" "io" "math/big" @@ -46,7 +47,6 @@ import ( "github.com/containerd/stargz-snapshotter/fs/config" commonmetrics "github.com/containerd/stargz-snapshotter/fs/metrics/common" "github.com/containerd/stargz-snapshotter/fs/source" - "github.com/hashicorp/go-multierror" rhttp "github.com/hashicorp/go-retryablehttp" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" @@ -129,12 +129,12 @@ func (r *Resolver) resolveFetcher(ctx context.Context, hosts source.RegistryHost minWaitMSec: time.Duration(blobConfig.MinWaitMSec) * time.Millisecond, maxWaitMSec: time.Duration(blobConfig.MaxWaitMSec) * time.Millisecond, } - var handlersErr error + var errs []error for name, p := range r.handlers { // TODO: allow to configure the selection of readers based on the hostname in refspec r, size, err := p.Handle(ctx, desc) if err != nil { - handlersErr = multierror.Append(handlersErr, err) + errs = append(errs, err) continue } log.G(ctx).WithField("handler name", name).WithField("ref", refspec.String()).WithField("digest", desc.Digest). @@ -142,6 +142,8 @@ func (r *Resolver) resolveFetcher(ctx context.Context, hosts source.RegistryHost return &remoteFetcher{r}, size, nil } + handlersErr := errors.Join(errs...) + log.G(ctx).WithError(handlersErr).WithField("ref", refspec.String()).WithField("digest", desc.Digest).Debugf("using default handler") hf, size, err := newHTTPFetcher(ctx, fc) if err != nil { diff --git a/vendor/github.com/containerd/stargz-snapshotter/snapshot/snapshot.go b/vendor/github.com/containerd/stargz-snapshotter/snapshot/snapshot.go index eac479870068..03dbeca07315 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/snapshot/snapshot.go +++ b/vendor/github.com/containerd/stargz-snapshotter/snapshot/snapshot.go @@ -73,6 +73,7 @@ type SnapshotterConfig struct { asyncRemove bool noRestore bool allowInvalidMountsOnRestart bool + detach bool } // Opt is an option to configure the remote snapshotter @@ -97,6 +98,11 @@ func AllowInvalidMountsOnRestart(config *SnapshotterConfig) error { return nil } +func SetDetachFlag(config *SnapshotterConfig) error { + config.detach = true + return nil +} + type snapshotter struct { root string ms *storage.MetaStore @@ -107,6 +113,7 @@ type snapshotter struct { userxattr bool // whether to enable "userxattr" mount option noRestore bool allowInvalidMountsOnRestart bool + detach bool } // NewSnapshotter returns a Snapshotter which can use unpacked remote layers @@ -157,6 +164,7 @@ func NewSnapshotter(ctx context.Context, root string, targetFs FileSystem, opts userxattr: userxattr, noRestore: config.noRestore, allowInvalidMountsOnRestart: config.allowInvalidMountsOnRestart, + detach: config.detach, } if err := o.restoreRemoteSnapshot(ctx); err != nil { @@ -425,6 +433,15 @@ func (o *snapshotter) cleanup(ctx context.Context, cleanupCommitted bool) error return nil } +func (o *snapshotter) cleanupMetadataDB() error { + metadataDBPath := filepath.Join(o.root, "metadata.db") + err := os.Remove(metadataDBPath) + if err != nil { + return err + } + return nil +} + func (o *snapshotter) cleanupDirectories(ctx context.Context, cleanupCommitted bool) ([]string, error) { // Get a write transaction to ensure no other write transaction can be entered // while the cleanup is scanning. @@ -658,6 +675,11 @@ func (o *snapshotter) Close() error { if err := o.cleanup(ctx, cleanupCommitted); err != nil { log.G(ctx).WithError(err).Warn("failed to cleanup") } + + if err := o.cleanupMetadataDB(); err != nil { + log.G(ctx).WithError(err).Warn("failed to cleanup metadata.db") + } + return o.ms.Close() } @@ -722,6 +744,10 @@ func (o *snapshotter) checkAvailability(ctx context.Context, key string) bool { } func (o *snapshotter) restoreRemoteSnapshot(ctx context.Context) error { + if o.detach { + return nil + } + mounts, err := mountinfo.GetMounts(nil) if err != nil { return err diff --git a/vendor/github.com/hanwen/go-fuse/v2/fs/api.go b/vendor/github.com/hanwen/go-fuse/v2/fs/api.go index bc93e4eb2822..d089125f2754 100644 --- a/vendor/github.com/hanwen/go-fuse/v2/fs/api.go +++ b/vendor/github.com/hanwen/go-fuse/v2/fs/api.go @@ -402,6 +402,10 @@ type NodeCopyFileRanger interface { // Ugh. should have been called Copyfilerange } +type NodeStatxer interface { + Statx(ctx context.Context, f FileHandle, flags uint32, mask uint32, out *fuse.StatxOut) syscall.Errno +} + // Lseek is used to implement holes: it should return the // first offset beyond `off` where there is data (SEEK_DATA) // or where there is a hole (SEEK_HOLE). @@ -604,6 +608,10 @@ type FileGetattrer interface { Getattr(ctx context.Context, out *fuse.AttrOut) syscall.Errno } +type FileStatxer interface { + Statx(ctx context.Context, flags uint32, mask uint32, out *fuse.StatxOut) syscall.Errno +} + // See NodeReader. type FileReader interface { Read(ctx context.Context, dest []byte, off int64) (fuse.ReadResult, syscall.Errno) diff --git a/vendor/github.com/hanwen/go-fuse/v2/fs/bridge.go b/vendor/github.com/hanwen/go-fuse/v2/fs/bridge.go index 06edd8b0febf..c0dd97079d32 100644 --- a/vendor/github.com/hanwen/go-fuse/v2/fs/bridge.go +++ b/vendor/github.com/hanwen/go-fuse/v2/fs/bridge.go @@ -484,7 +484,9 @@ func (b *rawBridge) Create(cancel <-chan struct{}, input *fuse.CreateIn, name st } child, fe := b.addNewChild(parent, name, child, f, input.Flags|syscall.O_CREAT|syscall.O_EXCL, &out.EntryOut) - out.Fh = uint64(fe.fh) + if fe != nil { + out.Fh = uint64(fe.fh) + } out.OpenFlags = flags b.addBackingID(child, f, &out.OpenOut) diff --git a/vendor/github.com/hanwen/go-fuse/v2/fs/bridge_linux.go b/vendor/github.com/hanwen/go-fuse/v2/fs/bridge_linux.go new file mode 100644 index 000000000000..ba07cb412e72 --- /dev/null +++ b/vendor/github.com/hanwen/go-fuse/v2/fs/bridge_linux.go @@ -0,0 +1,60 @@ +package fs + +import ( + "syscall" + + "github.com/hanwen/go-fuse/v2/fuse" +) + +// see rawBridge.setAttr +func (b *rawBridge) setStatx(out *fuse.Statx) { + if !b.options.NullPermissions && out.Mode&07777 == 0 { + out.Mode |= 0644 + if out.Mode&syscall.S_IFDIR != 0 { + out.Mode |= 0111 + } + } + if b.options.UID != 0 && out.Uid == 0 { + out.Uid = b.options.UID + } + if b.options.GID != 0 && out.Gid == 0 { + out.Gid = b.options.GID + } + setStatxBlocks(out) +} + +// see rawBridge.setAttrTimeout +func (b *rawBridge) setStatxTimeout(out *fuse.StatxOut) { + if b.options.AttrTimeout != nil && out.Timeout() == 0 { + out.SetTimeout(*b.options.AttrTimeout) + } +} + +func (b *rawBridge) Statx(cancel <-chan struct{}, in *fuse.StatxIn, out *fuse.StatxOut) fuse.Status { + n, fe := b.inode(in.NodeId, in.Fh) + var fh FileHandle + if fe != nil { + fh = fe.file + } + + ctx := &fuse.Context{Caller: in.Caller, Cancel: cancel} + + errno := syscall.ENOSYS + if sx, ok := n.ops.(NodeStatxer); ok { + errno = sx.Statx(ctx, fh, in.SxFlags, in.SxMask, out) + } else if fsx, ok := n.ops.(FileStatxer); ok { + errno = fsx.Statx(ctx, in.SxFlags, in.SxMask, out) + } + + if errno == 0 { + if out.Ino != 0 && n.stableAttr.Ino > 1 && out.Ino != n.stableAttr.Ino { + b.logf("warning: rawBridge.getattr: overriding ino %d with %d", out.Ino, n.stableAttr.Ino) + } + out.Ino = n.stableAttr.Ino + out.Mode = (out.Statx.Mode & 07777) | uint16(n.stableAttr.Mode) + b.setStatx(&out.Statx) + b.setStatxTimeout(out) + } + + return errnoToStatus(errno) +} diff --git a/vendor/github.com/hanwen/go-fuse/v2/fs/bridge_nonlinux.go b/vendor/github.com/hanwen/go-fuse/v2/fs/bridge_nonlinux.go new file mode 100644 index 000000000000..e41e7d956b2a --- /dev/null +++ b/vendor/github.com/hanwen/go-fuse/v2/fs/bridge_nonlinux.go @@ -0,0 +1,9 @@ +//go:build !linux + +package fs + +import "github.com/hanwen/go-fuse/v2/fuse" + +func (b *rawBridge) Statx(cancel <-chan struct{}, in *fuse.StatxIn, out *fuse.StatxOut) fuse.Status { + return fuse.ENOSYS +} diff --git a/vendor/github.com/hanwen/go-fuse/v2/fs/files_linux.go b/vendor/github.com/hanwen/go-fuse/v2/fs/files_linux.go index 3eea61ca06b1..a0c8f397e591 100644 --- a/vendor/github.com/hanwen/go-fuse/v2/fs/files_linux.go +++ b/vendor/github.com/hanwen/go-fuse/v2/fs/files_linux.go @@ -5,7 +5,11 @@ package fs import ( + "context" + "syscall" + "github.com/hanwen/go-fuse/v2/fuse" + "golang.org/x/sys/unix" ) func setBlocks(out *fuse.Attr) { @@ -17,3 +21,26 @@ func setBlocks(out *fuse.Attr) { pages := (out.Size + 4095) / 4096 out.Blocks = pages * 8 } + +func setStatxBlocks(out *fuse.Statx) { + if out.Blksize > 0 { + return + } + + out.Blksize = 4096 + pages := (out.Size + 4095) / 4096 + out.Blocks = pages * 8 +} + +func (f *loopbackFile) Statx(ctx context.Context, flags uint32, mask uint32, out *fuse.StatxOut) syscall.Errno { + f.mu.Lock() + defer f.mu.Unlock() + st := unix.Statx_t{} + err := unix.Statx(f.fd, "", int(flags), int(mask), &st) + if err != nil { + return ToErrno(err) + } + out.FromStatx(&st) + + return OK +} diff --git a/vendor/github.com/hanwen/go-fuse/v2/fs/loopback_linux.go b/vendor/github.com/hanwen/go-fuse/v2/fs/loopback_linux.go index 08b99eb824d1..298a3fb6eca1 100644 --- a/vendor/github.com/hanwen/go-fuse/v2/fs/loopback_linux.go +++ b/vendor/github.com/hanwen/go-fuse/v2/fs/loopback_linux.go @@ -8,8 +8,10 @@ package fs import ( + "context" "syscall" + "github.com/hanwen/go-fuse/v2/fuse" "golang.org/x/sys/unix" ) @@ -24,3 +26,25 @@ func doCopyFileRange(fdIn int, offIn int64, fdOut int, offOut int64, func intDev(dev uint32) int { return int(dev) } + +var _ = (NodeStatxer)((*LoopbackNode)(nil)) + +func (n *LoopbackNode) Statx(ctx context.Context, f FileHandle, + flags uint32, mask uint32, + out *fuse.StatxOut) syscall.Errno { + if f != nil { + if fga, ok := f.(FileStatxer); ok { + return fga.Statx(ctx, flags, mask, out) + } + } + + p := n.path() + + st := unix.Statx_t{} + err := unix.Statx(unix.AT_FDCWD, p, int(flags), int(mask), &st) + if err != nil { + return ToErrno(err) + } + out.FromStatx(&st) + return OK +} diff --git a/vendor/github.com/hanwen/go-fuse/v2/fuse/api.go b/vendor/github.com/hanwen/go-fuse/v2/fuse/api.go index 13a3fd7feb9e..d335435ea78b 100644 --- a/vendor/github.com/hanwen/go-fuse/v2/fuse/api.go +++ b/vendor/github.com/hanwen/go-fuse/v2/fuse/api.go @@ -329,6 +329,12 @@ type MountOptions struct { // directory queries (i.e. 'ls' without '-l') can be faster with // ReadDir, as no per-file stat calls are needed DisableReadDirPlus bool + + // Disable splicing from files to the FUSE device. + DisableSplice bool + + // Maximum stacking depth for passthrough files. Defaults to 1. + MaxStackDepth int } // RawFileSystem is an interface close to the FUSE wire protocol. @@ -439,6 +445,7 @@ type RawFileSystem interface { StatFs(cancel <-chan struct{}, input *InHeader, out *StatfsOut) (code Status) + Statx(cancel <-chan struct{}, input *StatxIn, out *StatxOut) (code Status) // This is called on processing the first request. The // filesystem implementation can use the server argument to // talk back to the kernel (through notify methods). diff --git a/vendor/github.com/hanwen/go-fuse/v2/fuse/attr_linux.go b/vendor/github.com/hanwen/go-fuse/v2/fuse/attr_linux.go index 223676a9500e..afcf217208ca 100644 --- a/vendor/github.com/hanwen/go-fuse/v2/fuse/attr_linux.go +++ b/vendor/github.com/hanwen/go-fuse/v2/fuse/attr_linux.go @@ -6,6 +6,8 @@ package fuse import ( "syscall" + + "golang.org/x/sys/unix" ) func (a *Attr) FromStat(s *syscall.Stat_t) { @@ -25,3 +27,25 @@ func (a *Attr) FromStat(s *syscall.Stat_t) { a.Rdev = uint32(s.Rdev) a.Blksize = uint32(s.Blksize) } + +func (a *Statx) FromStatx(s *unix.Statx_t) { + a.Ino = uint64(s.Ino) + a.Size = uint64(s.Size) + a.Blocks = uint64(s.Blocks) + a.Atime.FromStatxTimestamp(&s.Atime) + a.Btime.FromStatxTimestamp(&s.Btime) + a.Ctime.FromStatxTimestamp(&s.Ctime) + a.Mtime.FromStatxTimestamp(&s.Mtime) + a.Mode = s.Mode + a.Nlink = uint32(s.Nlink) + a.Uid = uint32(s.Uid) + a.Gid = uint32(s.Gid) + a.Blksize = uint32(s.Blksize) + a.AttributesMask = s.Attributes_mask + a.Mask = s.Mask + a.Attributes = s.Attributes + a.RdevMinor = s.Rdev_minor + a.RdevMajor = s.Rdev_major + a.DevMajor = s.Dev_major + a.DevMinor = s.Dev_minor +} diff --git a/vendor/github.com/hanwen/go-fuse/v2/fuse/bufferpool.go b/vendor/github.com/hanwen/go-fuse/v2/fuse/bufferpool.go index f419ec84682c..6da37ffb9fca 100644 --- a/vendor/github.com/hanwen/go-fuse/v2/fuse/bufferpool.go +++ b/vendor/github.com/hanwen/go-fuse/v2/fuse/bufferpool.go @@ -16,21 +16,35 @@ type bufferPool struct { // For each page size multiple a list of slice pointers. buffersBySize []*sync.Pool + + // Number of outstanding allocations. Used for testing. + countersBySize []int } var pageSize = os.Getpagesize() -func (p *bufferPool) getPool(pageCount int) *sync.Pool { +func (p *bufferPool) counters() []int { + p.lock.Lock() + defer p.lock.Unlock() + + d := make([]int, len(p.countersBySize)) + copy(d, p.countersBySize) + return d +} + +func (p *bufferPool) getPool(pageCount int, delta int) *sync.Pool { p.lock.Lock() defer p.lock.Unlock() for len(p.buffersBySize) <= pageCount { p.buffersBySize = append(p.buffersBySize, nil) + p.countersBySize = append(p.countersBySize, 0) } if p.buffersBySize[pageCount] == nil { p.buffersBySize[pageCount] = &sync.Pool{ New: func() interface{} { return make([]byte, pageSize*pageCount) }, } } + p.countersBySize[pageCount] += delta return p.buffersBySize[pageCount] } @@ -47,7 +61,7 @@ func (p *bufferPool) AllocBuffer(size uint32) []byte { } pages := sz / pageSize - b := p.getPool(pages).Get().([]byte) + b := p.getPool(pages, 1).Get().([]byte) return b[:size] } @@ -64,5 +78,5 @@ func (p *bufferPool) FreeBuffer(slice []byte) { pages := cap(slice) / pageSize slice = slice[:cap(slice)] - p.getPool(pages).Put(slice) + p.getPool(pages, -1).Put(slice) } diff --git a/vendor/github.com/hanwen/go-fuse/v2/fuse/defaultraw.go b/vendor/github.com/hanwen/go-fuse/v2/fuse/defaultraw.go index df109bdcf037..dd2dd1a72f26 100644 --- a/vendor/github.com/hanwen/go-fuse/v2/fuse/defaultraw.go +++ b/vendor/github.com/hanwen/go-fuse/v2/fuse/defaultraw.go @@ -166,3 +166,7 @@ func (fs *defaultRawFileSystem) CopyFileRange(cancel <-chan struct{}, input *Cop func (fs *defaultRawFileSystem) Lseek(cancel <-chan struct{}, in *LseekIn, out *LseekOut) Status { return ENOSYS } + +func (fs *defaultRawFileSystem) Statx(cancel <-chan struct{}, input *StatxIn, out *StatxOut) (code Status) { + return ENOSYS +} diff --git a/vendor/github.com/hanwen/go-fuse/v2/fuse/opcode.go b/vendor/github.com/hanwen/go-fuse/v2/fuse/opcode.go index 39d562485951..c7a66b8cffc3 100644 --- a/vendor/github.com/hanwen/go-fuse/v2/fuse/opcode.go +++ b/vendor/github.com/hanwen/go-fuse/v2/fuse/opcode.go @@ -84,7 +84,7 @@ const ( //////////////////////////////////////////////////////////////// -func doInit(server *Server, req *request) { +func doInit(server *protocolServer, req *request) { input := (*InitIn)(req.inData()) if input.Major != _FUSE_KERNEL_VERSION { log.Printf("Major versions does not match. Given %d, want %d\n", input.Major, _FUSE_KERNEL_VERSION) @@ -98,7 +98,6 @@ func doInit(server *Server, req *request) { } kernelFlags := input.Flags64() - server.kernelSettings = *input kernelFlags &= (CAP_ASYNC_READ | CAP_BIG_WRITES | CAP_FILE_OPS | CAP_READDIRPLUS | CAP_NO_OPEN_SUPPORT | CAP_PARALLEL_DIROPS | CAP_MAX_PAGES | CAP_RENAME_SWAP | CAP_PASSTHROUGH) @@ -147,7 +146,7 @@ func doInit(server *Server, req *request) { CongestionThreshold: uint16(server.opts.MaxBackground * 3 / 4), MaxBackground: uint16(server.opts.MaxBackground), MaxPages: uint16(maxPages), - MaxStackDepth: 1, + MaxStackDepth: uint32(server.opts.MaxStackDepth), } out.setFlags(kernelFlags) if server.opts.MaxReadAhead != 0 && uint32(server.opts.MaxReadAhead) < out.MaxReadAhead { @@ -160,7 +159,7 @@ func doInit(server *Server, req *request) { req.status = OK } -func doOpen(server *Server, req *request) { +func doOpen(server *protocolServer, req *request) { out := (*OpenOut)(req.outData()) status := server.fileSystem.Open(req.cancel, (*OpenIn)(req.inData()), out) req.status = status @@ -169,13 +168,13 @@ func doOpen(server *Server, req *request) { } } -func doCreate(server *Server, req *request) { +func doCreate(server *protocolServer, req *request) { out := (*CreateOut)(req.outData()) status := server.fileSystem.Create(req.cancel, (*CreateIn)(req.inData()), req.filename(), out) req.status = status } -func doReadDir(server *Server, req *request) { +func doReadDir(server *protocolServer, req *request) { in := (*ReadIn)(req.inData()) out := NewDirEntryList(req.outPayload, uint64(in.Offset)) code := server.fileSystem.ReadDir(req.cancel, in, out) @@ -183,7 +182,7 @@ func doReadDir(server *Server, req *request) { req.status = code } -func doReadDirPlus(server *Server, req *request) { +func doReadDirPlus(server *protocolServer, req *request) { in := (*ReadIn)(req.inData()) out := NewDirEntryList(req.outPayload, uint64(in.Offset)) @@ -192,25 +191,25 @@ func doReadDirPlus(server *Server, req *request) { req.status = code } -func doOpenDir(server *Server, req *request) { +func doOpenDir(server *protocolServer, req *request) { out := (*OpenOut)(req.outData()) status := server.fileSystem.OpenDir(req.cancel, (*OpenIn)(req.inData()), out) req.status = status } -func doSetattr(server *Server, req *request) { +func doSetattr(server *protocolServer, req *request) { out := (*AttrOut)(req.outData()) req.status = server.fileSystem.SetAttr(req.cancel, (*SetAttrIn)(req.inData()), out) } -func doWrite(server *Server, req *request) { +func doWrite(server *protocolServer, req *request) { n, status := server.fileSystem.Write(req.cancel, (*WriteIn)(req.inData()), req.inPayload) o := (*WriteOut)(req.outData()) o.Size = n req.status = status } -func doNotifyReply(server *Server, req *request) { +func doNotifyReply(server *protocolServer, req *request) { reply := (*NotifyRetrieveIn)(req.inData()) server.retrieveMu.Lock() reading := server.retrieveTab[reply.Unique] @@ -252,7 +251,7 @@ const _SECURITY_CAPABILITY = "security.capability" const _SECURITY_ACL = "system.posix_acl_access" const _SECURITY_ACL_DEFAULT = "system.posix_acl_default" -func doGetXAttr(server *Server, req *request) { +func doGetXAttr(server *protocolServer, req *request) { if server.opts.DisableXAttrs { req.status = ENOSYS return @@ -296,21 +295,21 @@ func doGetXAttr(server *Server, req *request) { } } -func doGetAttr(server *Server, req *request) { +func doGetAttr(server *protocolServer, req *request) { out := (*AttrOut)(req.outData()) s := server.fileSystem.GetAttr(req.cancel, (*GetAttrIn)(req.inData()), out) req.status = s } // doForget - forget one NodeId -func doForget(server *Server, req *request) { +func doForget(server *protocolServer, req *request) { if !server.opts.RememberInodes { server.fileSystem.Forget(req.inHeader().NodeId, (*ForgetIn)(req.inData()).Nlookup) } } // doBatchForget - forget a list of NodeIds -func doBatchForget(server *Server, req *request) { +func doBatchForget(server *protocolServer, req *request) { in := (*_BatchForgetIn)(req.inData()) wantBytes := uintptr(in.Count) * unsafe.Sizeof(_ForgetOne{}) if uintptr(len(req.inPayload)) < wantBytes { @@ -332,40 +331,40 @@ func doBatchForget(server *Server, req *request) { } } -func doReadlink(server *Server, req *request) { +func doReadlink(server *protocolServer, req *request) { req.outPayload, req.status = server.fileSystem.Readlink(req.cancel, req.inHeader()) } -func doLookup(server *Server, req *request) { +func doLookup(server *protocolServer, req *request) { out := (*EntryOut)(req.outData()) req.status = server.fileSystem.Lookup(req.cancel, req.inHeader(), req.filename(), out) } -func doMknod(server *Server, req *request) { +func doMknod(server *protocolServer, req *request) { out := (*EntryOut)(req.outData()) req.status = server.fileSystem.Mknod(req.cancel, (*MknodIn)(req.inData()), req.filename(), out) } -func doMkdir(server *Server, req *request) { +func doMkdir(server *protocolServer, req *request) { out := (*EntryOut)(req.outData()) req.status = server.fileSystem.Mkdir(req.cancel, (*MkdirIn)(req.inData()), req.filename(), out) } -func doUnlink(server *Server, req *request) { +func doUnlink(server *protocolServer, req *request) { req.status = server.fileSystem.Unlink(req.cancel, req.inHeader(), req.filename()) } -func doRmdir(server *Server, req *request) { +func doRmdir(server *protocolServer, req *request) { req.status = server.fileSystem.Rmdir(req.cancel, req.inHeader(), req.filename()) } -func doLink(server *Server, req *request) { +func doLink(server *protocolServer, req *request) { out := (*EntryOut)(req.outData()) req.status = server.fileSystem.Link(req.cancel, (*LinkIn)(req.inData()), req.filename(), out) } -func doRead(server *Server, req *request) { +func doRead(server *protocolServer, req *request) { in := (*ReadIn)(req.inData()) req.readResult, req.status = server.fileSystem.Read(req.cancel, in, req.outPayload) if fd, ok := req.readResult.(*readResultFd); ok { @@ -375,47 +374,47 @@ func doRead(server *Server, req *request) { } } -func doFlush(server *Server, req *request) { +func doFlush(server *protocolServer, req *request) { req.status = server.fileSystem.Flush(req.cancel, (*FlushIn)(req.inData())) } -func doRelease(server *Server, req *request) { +func doRelease(server *protocolServer, req *request) { server.fileSystem.Release(req.cancel, (*ReleaseIn)(req.inData())) } -func doFsync(server *Server, req *request) { +func doFsync(server *protocolServer, req *request) { req.status = server.fileSystem.Fsync(req.cancel, (*FsyncIn)(req.inData())) } -func doReleaseDir(server *Server, req *request) { +func doReleaseDir(server *protocolServer, req *request) { server.fileSystem.ReleaseDir((*ReleaseIn)(req.inData())) } -func doFsyncDir(server *Server, req *request) { +func doFsyncDir(server *protocolServer, req *request) { req.status = server.fileSystem.FsyncDir(req.cancel, (*FsyncIn)(req.inData())) } -func doSetXAttr(server *Server, req *request) { +func doSetXAttr(server *protocolServer, req *request) { i := bytes.IndexByte(req.inPayload, 0) req.status = server.fileSystem.SetXAttr(req.cancel, (*SetXAttrIn)(req.inData()), string(req.inPayload[:i]), req.inPayload[i+1:]) } -func doRemoveXAttr(server *Server, req *request) { +func doRemoveXAttr(server *protocolServer, req *request) { req.status = server.fileSystem.RemoveXAttr(req.cancel, req.inHeader(), req.filename()) } -func doAccess(server *Server, req *request) { +func doAccess(server *protocolServer, req *request) { req.status = server.fileSystem.Access(req.cancel, (*AccessIn)(req.inData())) } -func doSymlink(server *Server, req *request) { +func doSymlink(server *protocolServer, req *request) { out := (*EntryOut)(req.outData()) n1, n2 := req.filenames() req.status = server.fileSystem.Symlink(req.cancel, req.inHeader(), n2, n1, out) } -func doRename(server *Server, req *request) { +func doRename(server *protocolServer, req *request) { if server.kernelSettings.supportsRenameSwap() { doRename2(server, req) return @@ -429,12 +428,12 @@ func doRename(server *Server, req *request) { req.status = server.fileSystem.Rename(req.cancel, &in, n1, n2) } -func doRename2(server *Server, req *request) { +func doRename2(server *protocolServer, req *request) { n1, n2 := req.filenames() req.status = server.fileSystem.Rename(req.cancel, (*RenameIn)(req.inData()), n1, n2) } -func doStatFs(server *Server, req *request) { +func doStatFs(server *protocolServer, req *request) { out := (*StatfsOut)(req.outData()) req.status = server.fileSystem.StatFs(req.cancel, req.inHeader(), out) if req.status == ENOSYS && runtime.GOOS == "darwin" { @@ -445,51 +444,51 @@ func doStatFs(server *Server, req *request) { } } -func doIoctl(server *Server, req *request) { +func doIoctl(server *protocolServer, req *request) { req.status = Status(syscall.ENOTTY) } -func doDestroy(server *Server, req *request) { +func doDestroy(server *protocolServer, req *request) { req.status = OK } -func doFallocate(server *Server, req *request) { +func doFallocate(server *protocolServer, req *request) { req.status = server.fileSystem.Fallocate(req.cancel, (*FallocateIn)(req.inData())) } -func doGetLk(server *Server, req *request) { +func doGetLk(server *protocolServer, req *request) { req.status = server.fileSystem.GetLk(req.cancel, (*LkIn)(req.inData()), (*LkOut)(req.outData())) } -func doSetLk(server *Server, req *request) { +func doSetLk(server *protocolServer, req *request) { req.status = server.fileSystem.SetLk(req.cancel, (*LkIn)(req.inData())) } -func doSetLkw(server *Server, req *request) { +func doSetLkw(server *protocolServer, req *request) { req.status = server.fileSystem.SetLkw(req.cancel, (*LkIn)(req.inData())) } -func doLseek(server *Server, req *request) { +func doLseek(server *protocolServer, req *request) { in := (*LseekIn)(req.inData()) out := (*LseekOut)(req.outData()) req.status = server.fileSystem.Lseek(req.cancel, in, out) } -func doCopyFileRange(server *Server, req *request) { +func doCopyFileRange(server *protocolServer, req *request) { in := (*CopyFileRangeIn)(req.inData()) out := (*WriteOut)(req.outData()) out.Size, req.status = server.fileSystem.CopyFileRange(req.cancel, in) } -func doInterrupt(server *Server, req *request) { +func doInterrupt(server *protocolServer, req *request) { input := (*InterruptIn)(req.inData()) req.status = server.interruptRequest(input.Unique) } //////////////////////////////////////////////////////////////// -type operationFunc func(*Server, *request) +type operationFunc func(*protocolServer, *request) type castPointerFunc func(unsafe.Pointer) interface{} type operationHandler struct { @@ -742,6 +741,10 @@ func init() { operationHandlers[op].FileNames = count } + checkFixedBufferSize() +} + +func checkFixedBufferSize() { var r requestAlloc sizeOfOutHeader := unsafe.Sizeof(OutHeader{}) for code, h := range operationHandlers { diff --git a/vendor/github.com/hanwen/go-fuse/v2/fuse/opcode_linux.go b/vendor/github.com/hanwen/go-fuse/v2/fuse/opcode_linux.go new file mode 100644 index 000000000000..204faffd7160 --- /dev/null +++ b/vendor/github.com/hanwen/go-fuse/v2/fuse/opcode_linux.go @@ -0,0 +1,26 @@ +// Copyright 2024 the Go-FUSE Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fuse + +import "unsafe" + +func doStatx(server *protocolServer, req *request) { + in := (*StatxIn)(req.inData()) + out := (*StatxOut)(req.outData()) + + req.status = server.fileSystem.Statx(req.cancel, in, out) +} + +func init() { + operationHandlers[_OP_STATX] = &operationHandler{ + Name: "STATX", + Func: doStatx, + InType: StatxIn{}, + OutType: StatxOut{}, + InputSize: unsafe.Sizeof(StatxIn{}), + OutputSize: unsafe.Sizeof(StatxOut{}), + } + checkFixedBufferSize() +} diff --git a/vendor/github.com/hanwen/go-fuse/v2/fuse/poll.go b/vendor/github.com/hanwen/go-fuse/v2/fuse/poll.go index 7627bc9e2405..937d1abbb804 100644 --- a/vendor/github.com/hanwen/go-fuse/v2/fuse/poll.go +++ b/vendor/github.com/hanwen/go-fuse/v2/fuse/poll.go @@ -8,7 +8,7 @@ package fuse const pollHackName = ".go-fuse-epoll-hack" const pollHackInode = ^uint64(0) -func doPollHackLookup(ms *Server, req *request) { +func doPollHackLookup(ms *protocolServer, req *request) { attr := Attr{ Ino: pollHackInode, Mode: S_IFREG | 0644, diff --git a/vendor/github.com/hanwen/go-fuse/v2/fuse/print.go b/vendor/github.com/hanwen/go-fuse/v2/fuse/print.go index 40949563805b..1b504ededa97 100644 --- a/vendor/github.com/hanwen/go-fuse/v2/fuse/print.go +++ b/vendor/github.com/hanwen/go-fuse/v2/fuse/print.go @@ -53,6 +53,7 @@ var ( {CAP_PASSTHROUGH, "PASSTHROUGH"}, {CAP_NO_EXPORT_SUPPORT, "NO_EXPORT_SUPPORT"}, {CAP_HAS_RESEND, "HAS_RESEND"}, + {CAP_ALLOW_IDMAP, "ALLOW_IDMAP"}, }) releaseFlagNames = newFlagNames([]flagNameEntry{ {RELEASE_FLUSH, "FLUSH"}, @@ -385,3 +386,8 @@ func (a *Attr) string() string { func (m *BackingMap) string() string { return fmt.Sprintf("{fd %d, flags 0x%x}", m.Fd, m.Flags) } + +func (o *_IoctlIn) string() string { + return fmt.Sprintf("{Fh %d Flags %x Cmd %d Arg x%x, insz %d outsz %d}", + o.Fh, o.Flags, o.Cmd, o.Arg, o.InSize, o.OutSize) +} diff --git a/vendor/github.com/hanwen/go-fuse/v2/fuse/print_linux.go b/vendor/github.com/hanwen/go-fuse/v2/fuse/print_linux.go index 70fb8d10f678..bdb6e25c7e06 100644 --- a/vendor/github.com/hanwen/go-fuse/v2/fuse/print_linux.go +++ b/vendor/github.com/hanwen/go-fuse/v2/fuse/print_linux.go @@ -5,11 +5,30 @@ package fuse import ( + "fmt" "runtime" "strings" "syscall" + + "golang.org/x/sys/unix" ) +var statxFieldFlags = newFlagNames([]flagNameEntry{ + {unix.STATX_ATIME, "Atime"}, + {unix.STATX_BLOCKS, "blocks"}, + {unix.STATX_BTIME, "Btime"}, + {unix.STATX_CTIME, "Ctime"}, + {unix.STATX_GID, "Gid"}, + {unix.STATX_INO, "Ino"}, + {unix.STATX_MNT_ID, "Mntid"}, + {unix.STATX_MODE, "Mode"}, + {unix.STATX_MTIME, "Mtime"}, + {unix.STATX_NLINK, "Nlink"}, + {unix.STATX_SIZE, "Size"}, + {unix.STATX_TYPE, "Type"}, + {unix.STATX_UID, "Uid"}, +}) + func init() { // syscall.O_LARGEFILE is 0x0 on x86_64, but the kernel // supplies 0x8000 anyway, except on mips64el, where 0x8000 is @@ -29,3 +48,44 @@ func init() { initFlagNames.set(CAP_INIT_EXT, "INIT_EXT") initFlagNames.set(CAP_INIT_RESERVED, "INIT_RESERVED") } + +func (a *Statx) string() string { + var ss []string + if a.Mask&unix.STATX_MODE != 0 || a.Mask&unix.STATX_TYPE != 0 { + ss = append(ss, fmt.Sprintf("M0%o", a.Mode)) + } + if a.Mask&unix.STATX_SIZE != 0 { + ss = append(ss, fmt.Sprintf("SZ=%d", a.Size)) + } + if a.Mask&unix.STATX_NLINK != 0 { + ss = append(ss, fmt.Sprintf("L=%d", a.Nlink)) + } + if a.Mask&unix.STATX_UID != 0 || a.Mask&unix.STATX_GID != 0 { + ss = append(ss, fmt.Sprintf("%d:%d", a.Uid, a.Gid)) + } + if a.Mask&unix.STATX_INO != 0 { + ss = append(ss, fmt.Sprintf("i%d", a.Ino)) + } + if a.Mask&unix.STATX_ATIME != 0 { + ss = append(ss, fmt.Sprintf("A %f", a.Atime.Seconds())) + } + if a.Mask&unix.STATX_BTIME != 0 { + ss = append(ss, fmt.Sprintf("B %f", a.Btime.Seconds())) + } + if a.Mask&unix.STATX_CTIME != 0 { + ss = append(ss, fmt.Sprintf("C %f", a.Ctime.Seconds())) + } + if a.Mask&unix.STATX_MTIME != 0 { + ss = append(ss, fmt.Sprintf("M %f", a.Mtime.Seconds())) + } + if a.Mask&unix.STATX_BLOCKS != 0 { + ss = append(ss, fmt.Sprintf("%d*%d", a.Blocks, a.Blksize)) + } + + return "{" + strings.Join(ss, " ") + "}" +} + +func (in *StatxIn) string() string { + return fmt.Sprintf("{Fh %d %s 0x%x %s}", in.Fh, flagString(getAttrFlagNames, int64(in.GetattrFlags), ""), + in.SxFlags, flagString(statxFieldFlags, int64(in.SxMask), "")) +} diff --git a/vendor/github.com/hanwen/go-fuse/v2/fuse/protocol-server.go b/vendor/github.com/hanwen/go-fuse/v2/fuse/protocol-server.go new file mode 100644 index 000000000000..d5e7a1ffa234 --- /dev/null +++ b/vendor/github.com/hanwen/go-fuse/v2/fuse/protocol-server.go @@ -0,0 +1,133 @@ +// Copyright 2024 the Go-FUSE Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fuse + +import ( + "sync" +) + +// protocolServer bridges from the FUSE datatypes to a RawFileSystem +type protocolServer struct { + fileSystem RawFileSystem + + interruptMu sync.Mutex + reqInflight []*request + connectionDead bool + + latencies LatencyMap + + kernelSettings InitIn + + opts *MountOptions + + // in-flight notify-retrieve queries + retrieveMu sync.Mutex + retrieveNext uint64 + retrieveTab map[uint64]*retrieveCacheRequest // notifyUnique -> retrieve request +} + +func (ms *protocolServer) handleRequest(h *operationHandler, req *request) { + ms.addInflight(req) + defer ms.dropInflight(req) + + if req.status.Ok() && ms.opts.Debug { + ms.opts.Logger.Println(req.InputDebug()) + } + + if req.inHeader().NodeId == pollHackInode || + req.inHeader().NodeId == FUSE_ROOT_ID && h.FileNames > 0 && req.filename() == pollHackName { + doPollHackLookup(ms, req) + } else if req.status.Ok() && h.Func == nil { + ms.opts.Logger.Printf("Unimplemented opcode %v", operationName(req.inHeader().Opcode)) + req.status = ENOSYS + } else if req.status.Ok() { + h.Func(ms, req) + } + + // Forget/NotifyReply do not wait for reply from filesystem server. + switch req.inHeader().Opcode { + case _OP_FORGET, _OP_BATCH_FORGET, _OP_NOTIFY_REPLY: + req.suppressReply = true + case _OP_INTERRUPT: + // ? what other status can interrupt generate? + if req.status.Ok() { + req.suppressReply = true + } + } + if req.status == EINTR { + ms.interruptMu.Lock() + dead := ms.connectionDead + ms.interruptMu.Unlock() + if dead { + req.suppressReply = true + } + } + if req.suppressReply { + return + } + if req.inHeader().Opcode == _OP_INIT && ms.kernelSettings.Minor <= 22 { + // v8-v22 don't have TimeGran and further fields. + // This includes osxfuse (a.k.a. macfuse). + req.outHeader().Length = uint32(sizeOfOutHeader) + 24 + } + if req.fdData != nil && ms.opts.DisableSplice { + req.outPayload, req.status = req.fdData.Bytes(req.outPayload) + req.fdData = nil + } + + req.serializeHeader(req.outPayloadSize()) + + if ms.opts.Debug { + ms.opts.Logger.Println(req.OutputDebug()) + } +} + +func (ms *protocolServer) addInflight(req *request) { + ms.interruptMu.Lock() + defer ms.interruptMu.Unlock() + req.inflightIndex = len(ms.reqInflight) + ms.reqInflight = append(ms.reqInflight, req) +} + +func (ms *protocolServer) dropInflight(req *request) { + ms.interruptMu.Lock() + defer ms.interruptMu.Unlock() + this := req.inflightIndex + last := len(ms.reqInflight) - 1 + if last != this { + ms.reqInflight[this] = ms.reqInflight[last] + ms.reqInflight[this].inflightIndex = this + } + ms.reqInflight = ms.reqInflight[:last] +} + +func (ms *protocolServer) interruptRequest(unique uint64) Status { + ms.interruptMu.Lock() + defer ms.interruptMu.Unlock() + + // This is slow, but this operation is rare. + for _, inflight := range ms.reqInflight { + if unique == inflight.inHeader().Unique && !inflight.interrupted { + close(inflight.cancel) + inflight.interrupted = true + return OK + } + } + + return EAGAIN +} + +func (ms *protocolServer) cancelAll() { + ms.interruptMu.Lock() + defer ms.interruptMu.Unlock() + ms.connectionDead = true + for _, req := range ms.reqInflight { + if !req.interrupted { + close(req.cancel) + req.interrupted = true + } + } + // Leave ms.reqInflight alone, or dropInflight will barf. +} diff --git a/vendor/github.com/hanwen/go-fuse/v2/fuse/request.go b/vendor/github.com/hanwen/go-fuse/v2/fuse/request.go index 590730944d02..1bcdd391e796 100644 --- a/vendor/github.com/hanwen/go-fuse/v2/fuse/request.go +++ b/vendor/github.com/hanwen/go-fuse/v2/fuse/request.go @@ -22,6 +22,8 @@ type request struct { cancel chan struct{} + suppressReply bool + // written under Server.interruptMu interrupted bool @@ -77,7 +79,9 @@ func (r *request) outHeader() *OutHeader { return (*OutHeader)(unsafe.Pointer(&r.outputBuf[0])) } +// TODO - benchmark to see if this is necessary? func (r *request) clear() { + r.suppressReply = false r.inputBuf = nil r.outputBuf = nil r.inPayload = nil @@ -107,10 +111,10 @@ func (r *request) InputDebug() string { names := "" if h.FileNames == 1 { - names = fmt.Sprintf("%q", r.filename()) + names = fmt.Sprintf(" %q", r.filename()) } else if h.FileNames == 2 { n1, n2 := r.filenames() - names = fmt.Sprintf("%q %q", n1, n2) + names = fmt.Sprintf(" %q %q", n1, n2) } else if l := len(r.inPayload); l > 0 { dots := "" if l > 8 { @@ -185,7 +189,8 @@ func (r *request) inData() unsafe.Pointer { return unsafe.Pointer(&r.inputBuf[0]) } -func parseRequest(in []byte, kernelSettings *InitIn) (h *operationHandler, inSize, outSize, payloadSize int, errno Status) { +// note: outSize is without OutHeader +func parseRequest(in []byte, kernelSettings *InitIn) (h *operationHandler, inSize, outSize, outPayloadSize int, errno Status) { inSize = int(unsafe.Sizeof(InHeader{})) if len(in) < inSize { errno = EIO @@ -202,7 +207,7 @@ func parseRequest(in []byte, kernelSettings *InitIn) (h *operationHandler, inSiz if h.InputSize > 0 { inSize = int(h.InputSize) } - if hdr.Opcode == _OP_RENAME && kernelSettings.supportsRenameSwap() { + if kernelSettings != nil && hdr.Opcode == _OP_RENAME && kernelSettings.supportsRenameSwap() { inSize = int(unsafe.Sizeof(RenameIn{})) } if hdr.Opcode == _OP_INIT && inSize > len(in) { @@ -217,9 +222,9 @@ func parseRequest(in []byte, kernelSettings *InitIn) (h *operationHandler, inSiz switch hdr.Opcode { case _OP_READDIR, _OP_READDIRPLUS, _OP_READ: - payloadSize = int(((*ReadIn)(inData)).Size) + outPayloadSize = int(((*ReadIn)(inData)).Size) case _OP_GETXATTR, _OP_LISTXATTR: - payloadSize = int(((*GetXAttrIn)(inData)).Size) + outPayloadSize = int(((*GetXAttrIn)(inData)).Size) } outSize = int(h.OutputSize) diff --git a/vendor/github.com/hanwen/go-fuse/v2/fuse/request_linux.go b/vendor/github.com/hanwen/go-fuse/v2/fuse/request_linux.go index b93eef44e8e2..10951929cfb5 100644 --- a/vendor/github.com/hanwen/go-fuse/v2/fuse/request_linux.go +++ b/vendor/github.com/hanwen/go-fuse/v2/fuse/request_linux.go @@ -4,7 +4,7 @@ package fuse -const outputHeaderSize = 160 +const outputHeaderSize = 304 const ( _FUSE_KERNEL_VERSION = 7 diff --git a/vendor/github.com/hanwen/go-fuse/v2/fuse/server.go b/vendor/github.com/hanwen/go-fuse/v2/fuse/server.go index ab0ebecbba3d..08987f227df6 100644 --- a/vendor/github.com/hanwen/go-fuse/v2/fuse/server.go +++ b/vendor/github.com/hanwen/go-fuse/v2/fuse/server.go @@ -38,9 +38,10 @@ const ( // Server contains the logic for reading from the FUSE device and // translating it to RawFileSystem interface calls. type Server struct { + protocolServer + // Empty if unmounted. mountPoint string - fileSystem RawFileSystem // writeMu serializes close and notify writes writeMu sync.Mutex @@ -48,8 +49,6 @@ type Server struct { // I/O with kernel and daemon. mountFd int - latencies LatencyMap - opts *MountOptions // maxReaders is the maximum number of goroutines reading requests @@ -62,15 +61,9 @@ type Server struct { reqPool sync.Pool // Pool for raw requests data - readPool sync.Pool - reqMu sync.Mutex - reqReaders int - kernelSettings InitIn - - // in-flight notify-retrieve queries - retrieveMu sync.Mutex - retrieveNext uint64 - retrieveTab map[uint64]*retrieveCacheRequest // notifyUnique -> retrieve request + readPool sync.Pool + reqMu sync.Mutex + reqReaders int singleReader bool canSplice bool @@ -82,10 +75,6 @@ type Server struct { // for implementing single threaded processing. requestProcessingMu sync.Mutex - - interruptMu sync.Mutex - reqInflight []*request - connectionDead bool } // SetDebug is deprecated. Use MountOptions.Debug instead. @@ -189,7 +178,9 @@ func NewServer(fs RawFileSystem, mountPoint string, opts *MountOptions) (*Server if o.MaxWrite > MAX_KERNEL_WRITE { o.MaxWrite = MAX_KERNEL_WRITE } - + if o.MaxStackDepth == 0 { + o.MaxStackDepth = 1 + } if o.Name == "" { name := fs.String() l := len(name) @@ -207,10 +198,13 @@ func NewServer(fs RawFileSystem, mountPoint string, opts *MountOptions) (*Server } ms := &Server{ - fileSystem: fs, + protocolServer: protocolServer{ + fileSystem: fs, + retrieveTab: make(map[uint64]*retrieveCacheRequest), + opts: &o, + }, opts: &o, maxReaders: maxReaders, - retrieveTab: make(map[uint64]*retrieveCacheRequest), singleReader: useSingleReader, ready: make(chan error, 1), } @@ -543,55 +537,8 @@ exit: } } -func (ms *Server) addInflight(req *request) { - ms.interruptMu.Lock() - defer ms.interruptMu.Unlock() - req.inflightIndex = len(ms.reqInflight) - ms.reqInflight = append(ms.reqInflight, req) -} - -func (ms *Server) dropInflight(req *request) { - ms.interruptMu.Lock() - defer ms.interruptMu.Unlock() - this := req.inflightIndex - last := len(ms.reqInflight) - 1 - if last != this { - ms.reqInflight[this] = ms.reqInflight[last] - ms.reqInflight[this].inflightIndex = this - } - ms.reqInflight = ms.reqInflight[:last] -} - -func (ms *Server) interruptRequest(unique uint64) Status { - ms.interruptMu.Lock() - defer ms.interruptMu.Unlock() - - // This is slow, but this operation is rare. - for _, inflight := range ms.reqInflight { - if unique == inflight.inHeader().Unique && !inflight.interrupted { - close(inflight.cancel) - inflight.interrupted = true - return OK - } - } - - return EAGAIN -} - -func (ms *Server) cancelAll() { - ms.interruptMu.Lock() - defer ms.interruptMu.Unlock() - ms.connectionDead = true - for _, req := range ms.reqInflight { - if !req.interrupted { - close(req.cancel) - req.interrupted = true - } - } - // Leave ms.reqInflight alone, or dropInflight will barf. -} - func (ms *Server) handleRequest(req *requestAlloc) Status { + defer ms.returnRequest(req) if ms.opts.SingleThreaded { ms.requestProcessingMu.Lock() defer ms.requestProcessingMu.Unlock() @@ -599,6 +546,7 @@ func (ms *Server) handleRequest(req *requestAlloc) Status { h, inSize, outSize, outPayloadSize, code := parseRequest(req.inputBuf, &ms.kernelSettings) if !code.Ok() { + ms.opts.Logger.Printf("parseRequest: %v", code) return code } @@ -608,32 +556,14 @@ func (ms *Server) handleRequest(req *requestAlloc) Status { copy(req.outputBuf, zeroOutBuf[:]) if outPayloadSize > 0 { req.outPayload = ms.buffers.AllocBuffer(uint32(outPayloadSize)) + req.bufferPoolOutputBuf = req.outPayload } - code = ms.innerHandleRequest(h, &req.request) - ms.returnRequest(req) - return code -} - -func (ms *Server) innerHandleRequest(h *operationHandler, req *request) Status { - ms.addInflight(req) - defer ms.dropInflight(req) - - if req.status.Ok() && ms.opts.Debug { - ms.opts.Logger.Println(req.InputDebug()) - } - - if req.inHeader().NodeId == pollHackInode || - req.inHeader().NodeId == FUSE_ROOT_ID && h.FileNames > 0 && req.filename() == pollHackName { - doPollHackLookup(ms, req) - } else if req.status.Ok() && h.Func == nil { - ms.opts.Logger.Printf("Unimplemented opcode %v", operationName(req.inHeader().Opcode)) - req.status = ENOSYS - } else if req.status.Ok() { - h.Func(ms, req) + ms.protocolServer.handleRequest(h, &req.request) + if req.suppressReply { + return OK } - - errNo := ms.write(req) - if errNo != 0 { + errno := ms.write(&req.request) + if errno != 0 { // Ignore ENOENT for INTERRUPT responses which // indicates that the referred request is no longer // known by the kernel. This is a normal if the @@ -645,47 +575,33 @@ func (ms *Server) innerHandleRequest(h *operationHandler, req *request) Status { // RELEASE. This is because RELEASE is analogous to // FORGET, and is not synchronized with the calling // process, but does require a response. - if ms.opts.Debug || !(errNo == ENOENT && (req.inHeader().Opcode == _OP_INTERRUPT || + if ms.opts.Debug || !(errno == ENOENT && (req.inHeader().Opcode == _OP_INTERRUPT || + req.inHeader().Opcode == _OP_RELEASEDIR || req.inHeader().Opcode == _OP_RELEASE)) { ms.opts.Logger.Printf("writer: Write/Writev failed, err: %v. opcode: %v", - errNo, operationName(req.inHeader().Opcode)) + errno, operationName(req.inHeader().Opcode)) } } - return Status(errNo) + return errno } -func (ms *Server) write(req *request) Status { - // Forget/NotifyReply do not wait for reply from filesystem server. - switch req.inHeader().Opcode { - case _OP_FORGET, _OP_BATCH_FORGET, _OP_NOTIFY_REPLY: - return OK - case _OP_INTERRUPT: - if req.status.Ok() { - return OK - } - } - if req.status == EINTR { - ms.interruptMu.Lock() - dead := ms.connectionDead - ms.interruptMu.Unlock() - if dead { - return OK - } - } - - if req.inHeader().Opcode == _OP_INIT && ms.kernelSettings.Minor <= 22 { - // v8-v22 don't have TimeGran and further fields. - // This includes osxfuse (a.k.a. macfuse). - req.outHeader().Length = uint32(sizeOfOutHeader) + 24 - } +func (ms *Server) notifyWrite(req *request) Status { req.serializeHeader(req.outPayloadSize()) if ms.opts.Debug { ms.opts.Logger.Println(req.OutputDebug()) } - s := ms.systemWrite(req) - return s + // Protect against concurrent close. + ms.writeMu.Lock() + result := ms.write(req) + ms.writeMu.Unlock() + + if ms.opts.Debug { + h := getHandler(req.inHeader().Opcode) + ms.opts.Logger.Printf("Response %s: %v", h.Name, result) + } + return result } func newNotifyRequest(opcode uint32) *request { @@ -718,15 +634,7 @@ func (ms *Server) InodeNotify(node uint64, off int64, length int64) Status { entry.Off = off entry.Length = length - // Protect against concurrent close. - ms.writeMu.Lock() - result := ms.write(req) - ms.writeMu.Unlock() - - if ms.opts.Debug { - ms.opts.Logger.Println("Response: INODE_NOTIFY", result) - } - return result + return ms.notifyWrite(req) } // InodeNotifyStoreCache tells kernel to store data into inode's cache. @@ -771,15 +679,7 @@ func (ms *Server) inodeNotifyStoreCache32(node uint64, offset int64, data []byte req.outPayload = data - // Protect against concurrent close. - ms.writeMu.Lock() - result := ms.write(req) - ms.writeMu.Unlock() - - if ms.opts.Debug { - ms.opts.Logger.Printf("Response: INODE_NOTIFY_STORE_CACHE: %v", result) - } - return result + return ms.notifyWrite(req) } // InodeRetrieveCache retrieves data from kernel's inode cache. @@ -858,13 +758,7 @@ func (ms *Server) inodeRetrieveCache1(node uint64, offset int64, dest []byte) (n ms.retrieveMu.Unlock() // Protect against concurrent close. - ms.writeMu.Lock() - result := ms.write(req) - ms.writeMu.Unlock() - - if ms.opts.Debug { - ms.opts.Logger.Printf("Response: NOTIFY_RETRIEVE_CACHE: %v", result) - } + result := ms.notifyWrite(req) if result != OK { ms.retrieveMu.Lock() r := ms.retrieveTab[q.NotifyUnique] @@ -926,15 +820,7 @@ func (ms *Server) DeleteNotify(parent uint64, child uint64, name string) Status nameBytes[len(nameBytes)-1] = '\000' req.outPayload = nameBytes - // Protect against concurrent close. - ms.writeMu.Lock() - result := ms.write(req) - ms.writeMu.Unlock() - - if ms.opts.Debug { - ms.opts.Logger.Printf("Response: DELETE_NOTIFY: %v", result) - } - return result + return ms.notifyWrite(req) } // EntryNotify should be used if the existence status of an entry @@ -956,15 +842,7 @@ func (ms *Server) EntryNotify(parent uint64, name string) Status { nameBytes[len(nameBytes)-1] = '\000' req.outPayload = nameBytes - // Protect against concurrent close. - ms.writeMu.Lock() - result := ms.write(req) - ms.writeMu.Unlock() - - if ms.opts.Debug { - ms.opts.Logger.Printf("Response: ENTRY_NOTIFY: %v", result) - } - return result + return ms.notifyWrite(req) } // SupportsVersion returns true if the kernel supports the given diff --git a/vendor/github.com/hanwen/go-fuse/v2/fuse/server_linux.go b/vendor/github.com/hanwen/go-fuse/v2/fuse/server_linux.go index fb73d48e07bd..6e3f61adb1be 100644 --- a/vendor/github.com/hanwen/go-fuse/v2/fuse/server_linux.go +++ b/vendor/github.com/hanwen/go-fuse/v2/fuse/server_linux.go @@ -10,7 +10,7 @@ import ( const useSingleReader = false -func (ms *Server) systemWrite(req *request) Status { +func (ms *Server) write(req *request) Status { if req.outPayloadSize() == 0 { err := handleEINTR(func() error { _, err := syscall.Write(ms.mountFd, req.outputBuf) diff --git a/vendor/github.com/hanwen/go-fuse/v2/fuse/server_unix.go b/vendor/github.com/hanwen/go-fuse/v2/fuse/server_unix.go index b88e38778ad9..0e960c6ad168 100644 --- a/vendor/github.com/hanwen/go-fuse/v2/fuse/server_unix.go +++ b/vendor/github.com/hanwen/go-fuse/v2/fuse/server_unix.go @@ -11,7 +11,7 @@ import ( // do not error-out, meaning that unmount will hang. const useSingleReader = true -func (ms *Server) systemWrite(req *request) Status { +func (ms *Server) write(req *request) Status { if req.outPayloadSize() == 0 { err := handleEINTR(func() error { _, err := unix.Write(ms.mountFd, req.outputBuf) diff --git a/vendor/github.com/hanwen/go-fuse/v2/fuse/splice_linux.go b/vendor/github.com/hanwen/go-fuse/v2/fuse/splice_linux.go index 3ada6b5a06ba..11fd8576fb5b 100644 --- a/vendor/github.com/hanwen/go-fuse/v2/fuse/splice_linux.go +++ b/vendor/github.com/hanwen/go-fuse/v2/fuse/splice_linux.go @@ -12,7 +12,7 @@ import ( ) func (s *Server) setSplice() { - s.canSplice = splice.Resizable() + s.canSplice = splice.Resizable() && !s.opts.DisableSplice } // trySplice: Zero-copy read from fdData.Fd into /dev/fuse diff --git a/vendor/github.com/hanwen/go-fuse/v2/fuse/types.go b/vendor/github.com/hanwen/go-fuse/v2/fuse/types.go index 59713504c275..29442263dcaf 100644 --- a/vendor/github.com/hanwen/go-fuse/v2/fuse/types.go +++ b/vendor/github.com/hanwen/go-fuse/v2/fuse/types.go @@ -309,6 +309,7 @@ const ( CAP_PASSTHROUGH = (1 << 37) CAP_NO_EXPORT_SUPPORT = (1 << 38) CAP_HAS_RESEND = (1 << 39) + CAP_ALLOW_IDMAP = (1 << 40) ) type InitIn struct { @@ -786,3 +787,69 @@ type BackingMap struct { Flags uint32 padding uint64 } + +type SxTime struct { + Sec uint64 + Nsec uint32 + _reserved uint32 +} + +func (t *SxTime) Seconds() float64 { + return ft(t.Sec, t.Nsec) +} + +type Statx struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + + Uid uint32 + Gid uint32 + Mode uint16 + _spare0 uint16 + Ino uint64 + Size uint64 + Blocks uint64 + AttributesMask uint64 + + Atime SxTime + Btime SxTime + Ctime SxTime + Mtime SxTime + + RdevMajor uint32 + RdevMinor uint32 + DevMajor uint32 + DevMinor uint32 + _spare2 [14]uint64 +} + +type StatxIn struct { + InHeader + + GetattrFlags uint32 + _reserved uint32 + Fh uint64 + SxFlags uint32 + SxMask uint32 +} + +type StatxOut struct { + AttrValid uint64 + AttrValidNsec uint32 + Flags uint32 + _spare [2]uint64 + + Statx +} + +func (o *StatxOut) Timeout() time.Duration { + return time.Duration(uint64(o.AttrValidNsec) + o.AttrValid*1e9) +} + +func (o *StatxOut) SetTimeout(dt time.Duration) { + ns := int64(dt) + o.AttrValidNsec = uint32(ns % 1e9) + o.AttrValid = uint64(ns / 1e9) +} diff --git a/vendor/github.com/hanwen/go-fuse/v2/fuse/types_linux.go b/vendor/github.com/hanwen/go-fuse/v2/fuse/types_linux.go index 531647d7343a..40593c5d7c8e 100644 --- a/vendor/github.com/hanwen/go-fuse/v2/fuse/types_linux.go +++ b/vendor/github.com/hanwen/go-fuse/v2/fuse/types_linux.go @@ -6,6 +6,8 @@ package fuse import ( "syscall" + + "golang.org/x/sys/unix" ) const ( @@ -50,3 +52,8 @@ func (o *InitOut) setFlags(flags uint64) { o.Flags = uint32(flags) | CAP_INIT_EXT o.Flags2 = uint32(flags >> 32) } + +func (t *SxTime) FromStatxTimestamp(ts *unix.StatxTimestamp) { + t.Sec = uint64(ts.Sec) + t.Nsec = ts.Nsec +} diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/format.go b/vendor/github.com/vbatts/tar-split/archive/tar/format.go index 1f89d0c59a15..60977980c5a8 100644 --- a/vendor/github.com/vbatts/tar-split/archive/tar/format.go +++ b/vendor/github.com/vbatts/tar-split/archive/tar/format.go @@ -143,6 +143,10 @@ const ( blockSize = 512 // Size of each block in a tar stream nameSize = 100 // Max length of the name field in USTAR format prefixSize = 155 // Max length of the prefix field in USTAR format + + // Max length of a special file (PAX header, GNU long name or link). + // This matches the limit used by libarchive. + maxSpecialFileSize = 1 << 20 ) // blockPadding computes the number of bytes needed to pad offset up to the diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go index 6a6b3e018248..248a7ccb15a7 100644 --- a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go +++ b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go @@ -144,7 +144,7 @@ func (tr *Reader) next() (*Header, error) { continue // This is a meta header affecting the next header case TypeGNULongName, TypeGNULongLink: format.mayOnlyBe(FormatGNU) - realname, err := io.ReadAll(tr) + realname, err := readSpecialFile(tr) if err != nil { return nil, err } @@ -338,7 +338,7 @@ func mergePAX(hdr *Header, paxHdrs map[string]string) (err error) { // parsePAX parses PAX headers. // If an extended header (type 'x') is invalid, ErrHeader is returned func parsePAX(r io.Reader) (map[string]string, error) { - buf, err := io.ReadAll(r) + buf, err := readSpecialFile(r) if err != nil { return nil, err } @@ -889,6 +889,16 @@ func tryReadFull(r io.Reader, b []byte) (n int, err error) { return n, err } +// readSpecialFile is like io.ReadAll except it returns +// ErrFieldTooLong if more than maxSpecialFileSize is read. +func readSpecialFile(r io.Reader) ([]byte, error) { + buf, err := io.ReadAll(io.LimitReader(r, maxSpecialFileSize+1)) + if len(buf) > maxSpecialFileSize { + return nil, ErrFieldTooLong + } + return buf, err +} + // discard skips n bytes in r, reporting an error if unable to do so. func discard(tr *Reader, n int64) error { var seekSkipped, copySkipped int64 diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore index 895c7664beb5..ae8577ef366a 100644 --- a/vendor/go.opentelemetry.io/otel/.gitignore +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -12,11 +12,3 @@ go.work go.work.sum gen/ - -/example/dice/dice -/example/namedtracer/namedtracer -/example/otel-collector/otel-collector -/example/opencensus/opencensus -/example/passthrough/passthrough -/example/prometheus/prometheus -/example/zipkin/zipkin diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index d09555506f7e..dbfb2a165a06 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -127,8 +127,6 @@ linters-settings: - "**/metric/**/*.go" - "**/bridge/*.go" - "**/bridge/**/*.go" - - "**/example/*.go" - - "**/example/**/*.go" - "**/trace/*.go" - "**/trace/**/*.go" - "**/log/*.go" diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 4b361d0269c6..8f68dbd04aef 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,52 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08 + +### Added + +- Add `go.opentelemetry.io/otel/sdk/metric/exemplar.AlwaysOffFilter`, which can be used to disable exemplar recording. (#5850) +- Add `go.opentelemetry.io/otel/sdk/metric.WithExemplarFilter`, which can be used to configure the exemplar filter used by the metrics SDK. (#5850) +- Add `ExemplarReservoirProviderSelector` and `DefaultExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric`, which defines the exemplar reservoir to use based on the aggregation of the metric. (#5861) +- Add `ExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric.Stream` to allow using views to configure the exemplar reservoir to use for a metric. (#5861) +- Add `ReservoirProvider`, `HistogramReservoirProvider` and `FixedSizeReservoirProvider` to `go.opentelemetry.io/otel/sdk/metric/exemplar` to make it convenient to use providers of Reservoirs. (#5861) +- The `go.opentelemetry.io/otel/semconv/v1.27.0` package. + The package contains semantic conventions from the `v1.27.0` version of the OpenTelemetry Semantic Conventions. (#5894) +- Add `Attributes attribute.Set` field to `Scope` in `go.opentelemetry.io/otel/sdk/instrumentation`. (#5903) +- Add `Attributes attribute.Set` field to `ScopeRecords` in `go.opentelemetry.io/otel/log/logtest`. (#5927) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` adds instrumentation scope attributes. (#5934) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` adds instrumentation scope attributes. (#5934) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` adds instrumentation scope attributes. (#5935) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` adds instrumentation scope attributes. (#5935) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` adds instrumentation scope attributes. (#5933) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` adds instrumentation scope attributes. (#5933) +- `go.opentelemetry.io/otel/exporters/prometheus` adds instrumentation scope attributes in `otel_scope_info` metric as labels. (#5932) + +### Changed + +- Support scope attributes and make them as identifying for `Tracer` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/trace`. (#5924) +- Support scope attributes and make them as identifying for `Meter` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/metric`. (#5926) +- Support scope attributes and make them as identifying for `Logger` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/log`. (#5925) +- Make schema URL and scope attributes as identifying for `Tracer` in `go.opentelemetry.io/otel/bridge/opentracing`. (#5931) +- Clear unneeded slice elements to allow GC to collect the objects in `go.opentelemetry.io/otel/sdk/metric` and `go.opentelemetry.io/otel/sdk/trace`. (#5804) + +### Fixed + +- Global MeterProvider registration unwraps global instrument Observers, the undocumented Unwrap() methods are now private. (#5881) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5892) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5911) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5915) +- Fix `go.opentelemetry.io/otel/exporters/prometheus` trying to add exemplars to Gauge metrics, which is unsupported. (#5912) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5944) +- Fix incorrect metrics generated from callbacks when multiple readers are used in `go.opentelemetry.io/otel/sdk/metric`. (#5900) + +### Removed + +- Remove all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5930) + ## [1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11 ### Added @@ -3110,7 +3156,8 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.31.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.32.0...HEAD +[1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0 [1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0 [1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0 [1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index bb3396557432..22a2e9dbd495 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -629,6 +629,10 @@ should be canceled. ## Approvers and Maintainers +### Triagers + +- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent + ### Approvers ### Maintainers @@ -641,13 +645,13 @@ should be canceled. ### Emeritus -- [Aaron Clawson](https://github.com/MadVikingGod), LightStep -- [Anthony Mirabella](https://github.com/Aneurysm9), AWS -- [Chester Cheung](https://github.com/hanyuancheung), Tencent -- [Evan Torrie](https://github.com/evantorrie), Yahoo -- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep -- [Josh MacDonald](https://github.com/jmacd), LightStep -- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb +- [Aaron Clawson](https://github.com/MadVikingGod) +- [Anthony Mirabella](https://github.com/Aneurysm9) +- [Chester Cheung](https://github.com/hanyuancheung) +- [Evan Torrie](https://github.com/evantorrie) +- [Gustavo Silva Paiva](https://github.com/paivagustavo) +- [Josh MacDonald](https://github.com/jmacd) +- [Liz Fong-Jones](https://github.com/lizthegrey) ### Become an Approver or a Maintainer diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index a1228a212408..b8292a4fb910 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -260,7 +260,7 @@ SEMCONVPKG ?= "semconv/" semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT) [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -z "$(SEMCONVPKG)/capitalizations.txt" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)" $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go index 3a0cc42f6a47..ae92a4251666 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -13,7 +13,7 @@ import ( // unwrapper unwraps to return the underlying instrument implementation. type unwrapper interface { - Unwrap() metric.Observable + unwrap() metric.Observable } type afCounter struct { @@ -40,7 +40,7 @@ func (i *afCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afCounter) Unwrap() metric.Observable { +func (i *afCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableCounter) } @@ -71,7 +71,7 @@ func (i *afUpDownCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afUpDownCounter) Unwrap() metric.Observable { +func (i *afUpDownCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableUpDownCounter) } @@ -102,7 +102,7 @@ func (i *afGauge) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afGauge) Unwrap() metric.Observable { +func (i *afGauge) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableGauge) } @@ -133,7 +133,7 @@ func (i *aiCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiCounter) Unwrap() metric.Observable { +func (i *aiCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableCounter) } @@ -164,7 +164,7 @@ func (i *aiUpDownCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiUpDownCounter) Unwrap() metric.Observable { +func (i *aiUpDownCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableUpDownCounter) } @@ -195,7 +195,7 @@ func (i *aiGauge) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiGauge) Unwrap() metric.Observable { +func (i *aiGauge) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableGauge) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index e3db438a09f5..a6acd8dca66e 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -5,6 +5,7 @@ package global // import "go.opentelemetry.io/otel/internal/global" import ( "container/list" + "context" "reflect" "sync" @@ -66,6 +67,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me name: name, version: c.InstrumentationVersion(), schema: c.SchemaURL(), + attrs: c.InstrumentationAttributes(), } if p.meters == nil { @@ -472,8 +474,7 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) defer m.mtx.Unlock() if m.delegate != nil { - insts = unwrapInstruments(insts) - return m.delegate.RegisterCallback(f, insts...) + return m.delegate.RegisterCallback(unwrapCallback(f), unwrapInstruments(insts)...) } reg := ®istration{instruments: insts, function: f} @@ -487,15 +488,11 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) return reg, nil } -type wrapped interface { - unwrap() metric.Observable -} - func unwrapInstruments(instruments []metric.Observable) []metric.Observable { out := make([]metric.Observable, 0, len(instruments)) for _, inst := range instruments { - if in, ok := inst.(wrapped); ok { + if in, ok := inst.(unwrapper); ok { out = append(out, in.unwrap()) } else { out = append(out, inst) @@ -515,9 +512,61 @@ type registration struct { unregMu sync.Mutex } -func (c *registration) setDelegate(m metric.Meter) { - insts := unwrapInstruments(c.instruments) +type unwrapObs struct { + embedded.Observer + obs metric.Observer +} + +// unwrapFloat64Observable returns an expected metric.Float64Observable after +// unwrapping the global object. +func unwrapFloat64Observable(inst metric.Float64Observable) metric.Float64Observable { + if unwrapped, ok := inst.(unwrapper); ok { + if floatObs, ok := unwrapped.unwrap().(metric.Float64Observable); ok { + // Note: if the unwrapped object does not + // unwrap as an observable for either of the + // predicates here, it means an internal bug in + // this package. We avoid logging an error in + // this case, because the SDK has to try its + // own type conversion on the object. The SDK + // will see this and be forced to respond with + // its own error. + // + // This code uses a double-nested if statement + // to avoid creating a branch that is + // impossible to cover. + inst = floatObs + } + } + return inst +} + +// unwrapInt64Observable returns an expected metric.Int64Observable after +// unwrapping the global object. +func unwrapInt64Observable(inst metric.Int64Observable) metric.Int64Observable { + if unwrapped, ok := inst.(unwrapper); ok { + if unint, ok := unwrapped.unwrap().(metric.Int64Observable); ok { + // See the comment in unwrapFloat64Observable(). + inst = unint + } + } + return inst +} + +func (uo *unwrapObs) ObserveFloat64(inst metric.Float64Observable, value float64, opts ...metric.ObserveOption) { + uo.obs.ObserveFloat64(unwrapFloat64Observable(inst), value, opts...) +} + +func (uo *unwrapObs) ObserveInt64(inst metric.Int64Observable, value int64, opts ...metric.ObserveOption) { + uo.obs.ObserveInt64(unwrapInt64Observable(inst), value, opts...) +} +func unwrapCallback(f metric.Callback) metric.Callback { + return func(ctx context.Context, obs metric.Observer) error { + return f(ctx, &unwrapObs{obs: obs}) + } +} + +func (c *registration) setDelegate(m metric.Meter) { c.unregMu.Lock() defer c.unregMu.Unlock() @@ -526,7 +575,7 @@ func (c *registration) setDelegate(m metric.Meter) { return } - reg, err := m.RegisterCallback(c.function, insts...) + reg, err := m.RegisterCallback(unwrapCallback(c.function), unwrapInstruments(c.instruments)...) if err != nil { GetErrorHandler().Handle(err) return diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index e31f442b48f9..ac65262c6560 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -87,6 +87,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T name: name, version: c.InstrumentationVersion(), schema: c.SchemaURL(), + attrs: c.InstrumentationAttributes(), } if p.tracers == nil { @@ -102,7 +103,12 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T return t } -type il struct{ name, version, schema string } +type il struct { + name string + version string + schema string + attrs attribute.Set +} // tracer is a placeholder for a trace.Tracer. // diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go index 728115045bb4..34852a47b219 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go @@ -3,6 +3,8 @@ package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" +import "go.opentelemetry.io/otel/attribute" + // Scope represents the instrumentation scope. type Scope struct { // Name is the name of the instrumentation scope. This should be the @@ -12,4 +14,6 @@ type Scope struct { Version string // SchemaURL of the telemetry emitted by the scope. SchemaURL string + // Attributes of the telemetry emitted by the scope. + Attributes attribute.Set } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go index 544275a11467..203cd9d65080 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go @@ -5,18 +5,22 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( "context" - "fmt" + "errors" + "os" + "strings" "sync" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/sdk/metric/exemplar" "go.opentelemetry.io/otel/sdk/resource" ) // config contains configuration options for a MeterProvider. type config struct { - res *resource.Resource - readers []Reader - views []View + res *resource.Resource + readers []Reader + views []View + exemplarFilter exemplar.Filter } // readerSignals returns a force-flush and shutdown function for a @@ -40,25 +44,13 @@ func (c config) readerSignals() (forceFlush, shutdown func(context.Context) erro // value. func unify(funcs []func(context.Context) error) func(context.Context) error { return func(ctx context.Context) error { - var errs []error + var err error for _, f := range funcs { - if err := f(ctx); err != nil { - errs = append(errs, err) + if e := f(ctx); e != nil { + err = errors.Join(err, e) } } - return unifyErrors(errs) - } -} - -// unifyErrors combines multiple errors into a single error. -func unifyErrors(errs []error) error { - switch len(errs) { - case 0: - return nil - case 1: - return errs[0] - default: - return fmt.Errorf("%v", errs) + return err } } @@ -76,7 +68,13 @@ func unifyShutdown(funcs []func(context.Context) error) func(context.Context) er // newConfig returns a config configured with options. func newConfig(options []Option) config { - conf := config{res: resource.Default()} + conf := config{ + res: resource.Default(), + exemplarFilter: exemplar.TraceBasedFilter, + } + for _, o := range meterProviderOptionsFromEnv() { + conf = o.apply(conf) + } for _, o := range options { conf = o.apply(conf) } @@ -140,3 +138,35 @@ func WithView(views ...View) Option { return cfg }) } + +// WithExemplarFilter configures the exemplar filter. +// +// The exemplar filter determines which measurements are offered to the +// exemplar reservoir, but the exemplar reservoir makes the final decision of +// whether to store an exemplar. +// +// By default, the [exemplar.SampledFilter] +// is used. Exemplars can be entirely disabled by providing the +// [exemplar.AlwaysOffFilter]. +func WithExemplarFilter(filter exemplar.Filter) Option { + return optionFunc(func(cfg config) config { + cfg.exemplarFilter = filter + return cfg + }) +} + +func meterProviderOptionsFromEnv() []Option { + var opts []Option + // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar + const filterEnvKey = "OTEL_METRICS_EXEMPLAR_FILTER" + + switch strings.ToLower(strings.TrimSpace(os.Getenv(filterEnvKey))) { + case "always_on": + opts = append(opts, WithExemplarFilter(exemplar.AlwaysOnFilter)) + case "always_off": + opts = append(opts, WithExemplarFilter(exemplar.AlwaysOffFilter)) + case "trace_based": + opts = append(opts, WithExemplarFilter(exemplar.TraceBasedFilter)) + } + return opts +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go index 4beaa9ea00a5..0335b8ae48e2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go @@ -4,48 +4,49 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( - "os" "runtime" - "slices" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/metric/exemplar" "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" ) -// reservoirFunc returns the appropriately configured exemplar reservoir -// creation func based on the passed InstrumentKind and user defined -// environment variables. -// -// Note: This will only return non-nil values when the experimental exemplar -// feature is enabled and the OTEL_METRICS_EXEMPLAR_FILTER environment variable -// is not set to always_off. -func reservoirFunc[N int64 | float64](agg Aggregation) func() aggregate.FilteredExemplarReservoir[N] { - // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar - const filterEnvKey = "OTEL_METRICS_EXEMPLAR_FILTER" - - var filter exemplar.Filter +// ExemplarReservoirProviderSelector selects the +// [exemplar.ReservoirProvider] to use +// based on the [Aggregation] of the metric. +type ExemplarReservoirProviderSelector func(Aggregation) exemplar.ReservoirProvider - switch os.Getenv(filterEnvKey) { - case "always_on": - filter = exemplar.AlwaysOnFilter - case "always_off": - return aggregate.DropReservoir - case "trace_based": - fallthrough - default: - filter = exemplar.TraceBasedFilter +// reservoirFunc returns the appropriately configured exemplar reservoir +// creation func based on the passed InstrumentKind and filter configuration. +func reservoirFunc[N int64 | float64](provider exemplar.ReservoirProvider, filter exemplar.Filter) func(attribute.Set) aggregate.FilteredExemplarReservoir[N] { + return func(attrs attribute.Set) aggregate.FilteredExemplarReservoir[N] { + return aggregate.NewFilteredExemplarReservoir[N](filter, provider(attrs)) } +} +// DefaultExemplarReservoirProviderSelector returns the default +// [exemplar.ReservoirProvider] for the +// provided [Aggregation]. +// +// For explicit bucket histograms with more than 1 bucket, it uses the +// [exemplar.HistogramReservoirProvider]. +// For exponential histograms, it uses the +// [exemplar.FixedSizeReservoirProvider] +// with a size of min(20, max_buckets). +// For all other aggregations, it uses the +// [exemplar.FixedSizeReservoirProvider] +// with a size equal to the number of CPUs. +// +// Exemplar default reservoirs MAY change in a minor version bump. No +// guarantees are made on the shape or statistical properties of returned +// exemplars. +func DefaultExemplarReservoirProviderSelector(agg Aggregation) exemplar.ReservoirProvider { // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/metrics/sdk.md#exemplar-defaults // Explicit bucket histogram aggregation with more than 1 bucket will // use AlignedHistogramBucketExemplarReservoir. a, ok := agg.(AggregationExplicitBucketHistogram) if ok && len(a.Boundaries) > 0 { - cp := slices.Clone(a.Boundaries) - return func() aggregate.FilteredExemplarReservoir[N] { - bounds := cp - return aggregate.NewFilteredExemplarReservoir[N](filter, exemplar.NewHistogramReservoir(bounds)) - } + return exemplar.HistogramReservoirProvider(a.Boundaries) } var n int @@ -72,7 +73,5 @@ func reservoirFunc[N int64 | float64](agg Aggregation) func() aggregate.Filtered } } - return func() aggregate.FilteredExemplarReservoir[N] { - return aggregate.NewFilteredExemplarReservoir[N](filter, exemplar.NewFixedSizeReservoir(n)) - } + return exemplar.FixedSizeReservoirProvider(n) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/filter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/filter.go index 4d485200f568..b595e2acef3d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/filter.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/filter.go @@ -27,3 +27,8 @@ func TraceBasedFilter(ctx context.Context) bool { func AlwaysOnFilter(ctx context.Context) bool { return true } + +// AlwaysOffFilter is a [Filter] that never offers measurements. +func AlwaysOffFilter(ctx context.Context) bool { + return false +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go index 34160ca608bb..d4aab0aad4f8 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go @@ -12,6 +12,13 @@ import ( "go.opentelemetry.io/otel/attribute" ) +// FixedSizeReservoirProvider returns a provider of [FixedSizeReservoir]. +func FixedSizeReservoirProvider(k int) ReservoirProvider { + return func(_ attribute.Set) Reservoir { + return NewFixedSizeReservoir(k) + } +} + // NewFixedSizeReservoir returns a [FixedSizeReservoir] that samples at most // k exemplars. If there are k or less measurements made, the Reservoir will // sample each one. If there are more than k, the Reservoir will then randomly diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go index c27545a409a1..3b76cf305a42 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go @@ -12,13 +12,21 @@ import ( "go.opentelemetry.io/otel/attribute" ) +// HistogramReservoirProvider is a provider of [HistogramReservoir]. +func HistogramReservoirProvider(bounds []float64) ReservoirProvider { + cp := slices.Clone(bounds) + slices.Sort(cp) + return func(_ attribute.Set) Reservoir { + return NewHistogramReservoir(cp) + } +} + // NewHistogramReservoir returns a [HistogramReservoir] that samples the last // measurement that falls within a histogram bucket. The histogram bucket // upper-boundaries are define by bounds. // -// The passed bounds will be sorted by this function. +// The passed bounds must be sorted before calling this function. func NewHistogramReservoir(bounds []float64) *HistogramReservoir { - slices.Sort(bounds) return &HistogramReservoir{ bounds: bounds, storage: newStorage(len(bounds) + 1), diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/reservoir.go index 055ce5bc8ec2..ba5cd1a6b3d7 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/reservoir.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/reservoir.go @@ -30,3 +30,11 @@ type Reservoir interface { // The Reservoir state is preserved after this call. Collect(dest *[]Exemplar) } + +// ReservoirProvider creates new [Reservoir]s. +// +// The attributes provided are attributes which are kept by the aggregation, and +// are exclusive with attributes passed to Offer. The combination of these +// attributes and the attributes passed to Offer is the complete set of +// attributes a measurement was made with. +type ReservoirProvider func(attr attribute.Set) Reservoir diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go index 2e6ac543401d..48b723a7b3b3 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go @@ -144,6 +144,12 @@ type Stream struct { // Use NewAllowKeysFilter from "go.opentelemetry.io/otel/attribute" to // provide an allow-list of attribute keys here. AttributeFilter attribute.Filter + // ExemplarReservoirProvider selects the + // [go.opentelemetry.io/otel/sdk/metric/exemplar.ReservoirProvider] based + // on the [Aggregation]. + // + // If unspecified, [DefaultExemplarReservoirProviderSelector] is used. + ExemplarReservoirProviderSelector ExemplarReservoirProviderSelector } // instID are the identifying properties of a instrument. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go index f1f3ab673142..fde219333896 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go @@ -37,8 +37,8 @@ type Builder[N int64 | float64] struct { // create new exemplar reservoirs for a new seen attribute set. // // If this is not provided a default factory function that returns an - // DropReservoir reservoir will be used. - ReservoirFunc func() FilteredExemplarReservoir[N] + // dropReservoir reservoir will be used. + ReservoirFunc func(attribute.Set) FilteredExemplarReservoir[N] // AggregationLimit is the cardinality limit of measurement attributes. Any // measurement for new attributes once the limit has been reached will be // aggregated into a single aggregate for the "otel.metric.overflow" @@ -49,12 +49,12 @@ type Builder[N int64 | float64] struct { AggregationLimit int } -func (b Builder[N]) resFunc() func() FilteredExemplarReservoir[N] { +func (b Builder[N]) resFunc() func(attribute.Set) FilteredExemplarReservoir[N] { if b.ReservoirFunc != nil { return b.ReservoirFunc } - return DropReservoir + return dropReservoir } type fltrMeasure[N int64 | float64] func(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go index 4a3d4cc22186..8396faaa4aec 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go @@ -10,8 +10,10 @@ import ( "go.opentelemetry.io/otel/sdk/metric/exemplar" ) -// DropReservoir returns a [FilteredReservoir] that drops all measurements it is offered. -func DropReservoir[N int64 | float64]() FilteredExemplarReservoir[N] { return &dropRes[N]{} } +// dropReservoir returns a [FilteredReservoir] that drops all measurements it is offered. +func dropReservoir[N int64 | float64](attribute.Set) FilteredExemplarReservoir[N] { + return &dropRes[N]{} +} type dropRes[N int64 | float64] struct{} @@ -20,5 +22,6 @@ func (r *dropRes[N]) Offer(context.Context, N, []attribute.KeyValue) {} // Collect resets dest. No exemplars will ever be returned. func (r *dropRes[N]) Collect(dest *[]exemplar.Exemplar) { + clear(*dest) // Erase elements to let GC collect objects *dest = (*dest)[:0] } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go index dcb899d62677..25d709948e9c 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go @@ -17,6 +17,7 @@ var exemplarPool = sync.Pool{ func collectExemplars[N int64 | float64](out *[]metricdata.Exemplar[N], f func(*[]exemplar.Exemplar)) { dest := exemplarPool.Get().(*[]exemplar.Exemplar) defer func() { + clear(*dest) // Erase elements to let GC collect objects. *dest = (*dest)[:0] exemplarPool.Put(dest) }() diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go index a4de5674ba19..b7aa721651e3 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go @@ -283,7 +283,7 @@ func (b *expoBuckets) downscale(delta int32) { // newExponentialHistogram returns an Aggregator that summarizes a set of // measurements as an exponential histogram. Each histogram is scoped by attributes // and the aggregation cycle the measurements were made in. -func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func() FilteredExemplarReservoir[N]) *expoHistogram[N] { +func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *expoHistogram[N] { return &expoHistogram[N]{ noSum: noSum, noMinMax: noMinMax, @@ -306,7 +306,7 @@ type expoHistogram[N int64 | float64] struct { maxSize int maxScale int32 - newRes func() FilteredExemplarReservoir[N] + newRes func(attribute.Set) FilteredExemplarReservoir[N] limit limiter[*expoHistogramDataPoint[N]] values map[attribute.Distinct]*expoHistogramDataPoint[N] valuesMu sync.Mutex @@ -327,7 +327,7 @@ func (e *expoHistogram[N]) measure(ctx context.Context, value N, fltrAttr attrib v, ok := e.values[attr.Equivalent()] if !ok { v = newExpoHistogramDataPoint[N](attr, e.maxSize, e.maxScale, e.noMinMax, e.noSum) - v.res = e.newRes() + v.res = e.newRes(attr) e.values[attr.Equivalent()] = v } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go index 35d020378bd9..d577ae2c198f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go @@ -47,13 +47,13 @@ type histValues[N int64 | float64] struct { noSum bool bounds []float64 - newRes func() FilteredExemplarReservoir[N] + newRes func(attribute.Set) FilteredExemplarReservoir[N] limit limiter[*buckets[N]] values map[attribute.Distinct]*buckets[N] valuesMu sync.Mutex } -func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func() FilteredExemplarReservoir[N]) *histValues[N] { +func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *histValues[N] { // The responsibility of keeping all buckets correctly associated with the // passed boundaries is ultimately this type's responsibility. Make a copy // here so we can always guarantee this. Or, in the case of failure, have @@ -93,7 +93,7 @@ func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute // // buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞) b = newBuckets[N](attr, len(s.bounds)+1) - b.res = s.newRes() + b.res = s.newRes(attr) // Ensure min and max are recorded values (not zero), for new buckets. b.min, b.max = value, value @@ -108,7 +108,7 @@ func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute // newHistogram returns an Aggregator that summarizes a set of measurements as // an histogram. -func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func() FilteredExemplarReservoir[N]) *histogram[N] { +func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *histogram[N] { return &histogram[N]{ histValues: newHistValues[N](boundaries, noSum, limit, r), noMinMax: noMinMax, diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go index a7b5fe572bef..d3a93f085c94 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go @@ -19,7 +19,7 @@ type datapoint[N int64 | float64] struct { res FilteredExemplarReservoir[N] } -func newLastValue[N int64 | float64](limit int, r func() FilteredExemplarReservoir[N]) *lastValue[N] { +func newLastValue[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *lastValue[N] { return &lastValue[N]{ newRes: r, limit: newLimiter[datapoint[N]](limit), @@ -32,7 +32,7 @@ func newLastValue[N int64 | float64](limit int, r func() FilteredExemplarReservo type lastValue[N int64 | float64] struct { sync.Mutex - newRes func() FilteredExemplarReservoir[N] + newRes func(attribute.Set) FilteredExemplarReservoir[N] limit limiter[datapoint[N]] values map[attribute.Distinct]datapoint[N] start time.Time @@ -45,7 +45,7 @@ func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute. attr := s.limit.Attributes(fltrAttr, s.values) d, ok := s.values[attr.Equivalent()] if !ok { - d.res = s.newRes() + d.res = s.newRes(attr) } d.attrs = attr @@ -114,7 +114,7 @@ func (s *lastValue[N]) copyDpts(dest *[]metricdata.DataPoint[N], t time.Time) in // newPrecomputedLastValue returns an aggregator that summarizes a set of // observations as the last one made. -func newPrecomputedLastValue[N int64 | float64](limit int, r func() FilteredExemplarReservoir[N]) *precomputedLastValue[N] { +func newPrecomputedLastValue[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *precomputedLastValue[N] { return &precomputedLastValue[N]{lastValue: newLastValue[N](limit, r)} } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go index c3b591c37c00..8e132ad6181b 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go @@ -21,12 +21,12 @@ type sumValue[N int64 | float64] struct { // valueMap is the storage for sums. type valueMap[N int64 | float64] struct { sync.Mutex - newRes func() FilteredExemplarReservoir[N] + newRes func(attribute.Set) FilteredExemplarReservoir[N] limit limiter[sumValue[N]] values map[attribute.Distinct]sumValue[N] } -func newValueMap[N int64 | float64](limit int, r func() FilteredExemplarReservoir[N]) *valueMap[N] { +func newValueMap[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *valueMap[N] { return &valueMap[N]{ newRes: r, limit: newLimiter[sumValue[N]](limit), @@ -41,7 +41,7 @@ func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.S attr := s.limit.Attributes(fltrAttr, s.values) v, ok := s.values[attr.Equivalent()] if !ok { - v.res = s.newRes() + v.res = s.newRes(attr) } v.attrs = attr @@ -54,7 +54,7 @@ func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.S // newSum returns an aggregator that summarizes a set of measurements as their // arithmetic sum. Each sum is scoped by attributes and the aggregation cycle // the measurements were made in. -func newSum[N int64 | float64](monotonic bool, limit int, r func() FilteredExemplarReservoir[N]) *sum[N] { +func newSum[N int64 | float64](monotonic bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *sum[N] { return &sum[N]{ valueMap: newValueMap[N](limit, r), monotonic: monotonic, @@ -143,7 +143,7 @@ func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int { // newPrecomputedSum returns an aggregator that summarizes a set of // observations as their arithmetic sum. Each sum is scoped by attributes and // the aggregation cycle the measurements were made in. -func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() FilteredExemplarReservoir[N]) *precomputedSum[N] { +func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *precomputedSum[N] { return &precomputedSum[N]{ valueMap: newValueMap[N](limit, r), monotonic: monotonic, diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go index e0fd86ca78da..c495985bc28c 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go @@ -113,18 +113,17 @@ func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetr if err != nil { return err } - var errs []error for _, producer := range mr.externalProducers.Load().([]Producer) { - externalMetrics, err := producer.Produce(ctx) - if err != nil { - errs = append(errs, err) + externalMetrics, e := producer.Produce(ctx) + if e != nil { + err = errors.Join(err, e) } rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...) } global.Debug("ManualReader collection", "Data", rm) - return unifyErrors(errs) + return err } // MarshalLog returns logging data about the ManualReader. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go index abff4650e1a8..823cdf2c62f3 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go @@ -150,6 +150,11 @@ func (m *meter) int64ObservableInstrument(id Instrument, callbacks []metric.Int6 continue } inst.appendMeasures(in) + + // Add the measures to the pipeline. It is required to maintain + // measures per pipeline to avoid calling the measure that + // is not part of the pipeline. + insert.pipeline.addInt64Measure(inst.observableID, in) for _, cback := range callbacks { inst := int64Observer{measures: in} fn := cback @@ -309,6 +314,11 @@ func (m *meter) float64ObservableInstrument(id Instrument, callbacks []metric.Fl continue } inst.appendMeasures(in) + + // Add the measures to the pipeline. It is required to maintain + // measures per pipeline to avoid calling the measure that + // is not part of the pipeline. + insert.pipeline.addFloat64Measure(inst.observableID, in) for _, cback := range callbacks { inst := float64Observer{measures: in} fn := cback @@ -441,68 +451,75 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) return noopRegister{}, nil } - reg := newObserver() - var errs multierror + var err error + validInstruments := make([]metric.Observable, 0, len(insts)) for _, inst := range insts { - // Unwrap any global. - if u, ok := inst.(interface { - Unwrap() metric.Observable - }); ok { - inst = u.Unwrap() - } - switch o := inst.(type) { case int64Observable: - if err := o.registerable(m); err != nil { - if !errors.Is(err, errEmptyAgg) { - errs.append(err) + if e := o.registerable(m); e != nil { + if !errors.Is(e, errEmptyAgg) { + err = errors.Join(err, e) } continue } - reg.registerInt64(o.observableID) + + validInstruments = append(validInstruments, inst) case float64Observable: - if err := o.registerable(m); err != nil { - if !errors.Is(err, errEmptyAgg) { - errs.append(err) + if e := o.registerable(m); e != nil { + if !errors.Is(e, errEmptyAgg) { + err = errors.Join(err, e) } continue } - reg.registerFloat64(o.observableID) + + validInstruments = append(validInstruments, inst) default: // Instrument external to the SDK. return nil, fmt.Errorf("invalid observable: from different implementation") } } - err := errs.errorOrNil() - if reg.len() == 0 { + if len(validInstruments) == 0 { // All insts use drop aggregation or are invalid. return noopRegister{}, err } - // Some or all instruments were valid. - cback := func(ctx context.Context) error { return f(ctx, reg) } - return m.pipes.registerMultiCallback(cback), err + unregs := make([]func(), len(m.pipes)) + for ix, pipe := range m.pipes { + reg := newObserver(pipe) + for _, inst := range validInstruments { + switch o := inst.(type) { + case int64Observable: + reg.registerInt64(o.observableID) + case float64Observable: + reg.registerFloat64(o.observableID) + } + } + + // Some or all instruments were valid. + cBack := func(ctx context.Context) error { return f(ctx, reg) } + unregs[ix] = pipe.addMultiCallback(cBack) + } + + return unregisterFuncs{f: unregs}, err } type observer struct { embedded.Observer + pipe *pipeline float64 map[observableID[float64]]struct{} int64 map[observableID[int64]]struct{} } -func newObserver() observer { +func newObserver(p *pipeline) observer { return observer{ + pipe: p, float64: make(map[observableID[float64]]struct{}), int64: make(map[observableID[int64]]struct{}), } } -func (r observer) len() int { - return len(r.float64) + len(r.int64) -} - func (r observer) registerFloat64(id observableID[float64]) { r.float64[id] = struct{}{} } @@ -521,16 +538,6 @@ func (r observer) ObserveFloat64(o metric.Float64Observable, v float64, opts ... switch conv := o.(type) { case float64Observable: oImpl = conv - case interface { - Unwrap() metric.Observable - }: - // Unwrap any global. - async := conv.Unwrap() - var ok bool - if oImpl, ok = async.(float64Observable); !ok { - global.Error(errUnknownObserver, "failed to record asynchronous") - return - } default: global.Error(errUnknownObserver, "failed to record") return @@ -548,7 +555,12 @@ func (r observer) ObserveFloat64(o metric.Float64Observable, v float64, opts ... return } c := metric.NewObserveConfig(opts) - oImpl.observe(v, c.Attributes()) + // Access to r.pipe.float64Measure is already guarded by a lock in pipeline.produce. + // TODO (#5946): Refactor pipeline and observable measures. + measures := r.pipe.float64Measures[oImpl.observableID] + for _, m := range measures { + m(context.Background(), v, c.Attributes()) + } } func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric.ObserveOption) { @@ -556,16 +568,6 @@ func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric switch conv := o.(type) { case int64Observable: oImpl = conv - case interface { - Unwrap() metric.Observable - }: - // Unwrap any global. - async := conv.Unwrap() - var ok bool - if oImpl, ok = async.(int64Observable); !ok { - global.Error(errUnknownObserver, "failed to record asynchronous") - return - } default: global.Error(errUnknownObserver, "failed to record") return @@ -583,7 +585,12 @@ func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric return } c := metric.NewObserveConfig(opts) - oImpl.observe(v, c.Attributes()) + // Access to r.pipe.int64Measures is already guarded b a lock in pipeline.produce. + // TODO (#5946): Refactor pipeline and observable measures. + measures := r.pipe.int64Measures[oImpl.observableID] + for _, m := range measures { + m(context.Background(), v, c.Attributes()) + } } type noopRegister struct{ embedded.Registration } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go index 67ee1b11a2e5..dcd2182d9a15 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go @@ -251,18 +251,17 @@ func (r *PeriodicReader) collect(ctx context.Context, p interface{}, rm *metricd if err != nil { return err } - var errs []error for _, producer := range r.externalProducers.Load().([]Producer) { - externalMetrics, err := producer.Produce(ctx) - if err != nil { - errs = append(errs, err) + externalMetrics, e := producer.Produce(ctx) + if e != nil { + err = errors.Join(err, e) } rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...) } global.Debug("PeriodicReader collection", "Data", rm) - return unifyErrors(errs) + return err } // export exports metric data m using r's exporter. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go index 823bf2fe3d27..775e2452619a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go @@ -8,14 +8,13 @@ import ( "context" "errors" "fmt" - "strings" "sync" "sync/atomic" "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/embedded" "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/metric/exemplar" "go.opentelemetry.io/otel/sdk/metric/internal" "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" "go.opentelemetry.io/otel/sdk/metric/internal/x" @@ -38,14 +37,17 @@ type instrumentSync struct { compAgg aggregate.ComputeAggregation } -func newPipeline(res *resource.Resource, reader Reader, views []View) *pipeline { +func newPipeline(res *resource.Resource, reader Reader, views []View, exemplarFilter exemplar.Filter) *pipeline { if res == nil { res = resource.Empty() } return &pipeline{ - resource: res, - reader: reader, - views: views, + resource: res, + reader: reader, + views: views, + int64Measures: map[observableID[int64]][]aggregate.Measure[int64]{}, + float64Measures: map[observableID[float64]][]aggregate.Measure[float64]{}, + exemplarFilter: exemplarFilter, // aggregations is lazy allocated when needed. } } @@ -63,9 +65,26 @@ type pipeline struct { views []View sync.Mutex - aggregations map[instrumentation.Scope][]instrumentSync - callbacks []func(context.Context) error - multiCallbacks list.List + int64Measures map[observableID[int64]][]aggregate.Measure[int64] + float64Measures map[observableID[float64]][]aggregate.Measure[float64] + aggregations map[instrumentation.Scope][]instrumentSync + callbacks []func(context.Context) error + multiCallbacks list.List + exemplarFilter exemplar.Filter +} + +// addInt64Measure adds a new int64 measure to the pipeline for each observer. +func (p *pipeline) addInt64Measure(id observableID[int64], m []aggregate.Measure[int64]) { + p.Lock() + defer p.Unlock() + p.int64Measures[id] = m +} + +// addFloat64Measure adds a new float64 measure to the pipeline for each observer. +func (p *pipeline) addFloat64Measure(id observableID[float64], m []aggregate.Measure[float64]) { + p.Lock() + defer p.Unlock() + p.float64Measures[id] = m } // addSync adds the instrumentSync to pipeline p with scope. This method is not @@ -105,14 +124,15 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) p.Lock() defer p.Unlock() - var errs multierror + var err error for _, c := range p.callbacks { // TODO make the callbacks parallel. ( #3034 ) - if err := c(ctx); err != nil { - errs.append(err) + if e := c(ctx); e != nil { + err = errors.Join(err, e) } if err := ctx.Err(); err != nil { rm.Resource = nil + clear(rm.ScopeMetrics) // Erase elements to let GC collect objects. rm.ScopeMetrics = rm.ScopeMetrics[:0] return err } @@ -120,12 +140,13 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) for e := p.multiCallbacks.Front(); e != nil; e = e.Next() { // TODO make the callbacks parallel. ( #3034 ) f := e.Value.(multiCallback) - if err := f(ctx); err != nil { - errs.append(err) + if e := f(ctx); e != nil { + err = errors.Join(err, e) } if err := ctx.Err(); err != nil { // This means the context expired before we finished running callbacks. rm.Resource = nil + clear(rm.ScopeMetrics) // Erase elements to let GC collect objects. rm.ScopeMetrics = rm.ScopeMetrics[:0] return err } @@ -157,7 +178,7 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) rm.ScopeMetrics = rm.ScopeMetrics[:i] - return errs.errorOrNil() + return err } // inserter facilitates inserting of new instruments from a single scope into a @@ -219,7 +240,7 @@ func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) measures []aggregate.Measure[N] ) - errs := &multierror{wrapped: errCreatingAggregators} + var err error seen := make(map[uint64]struct{}) for _, v := range i.pipeline.views { stream, match := v(inst) @@ -227,9 +248,9 @@ func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) continue } matched = true - in, id, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation) - if err != nil { - errs.append(err) + in, id, e := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation) + if e != nil { + err = errors.Join(err, e) } if in == nil { // Drop aggregation. continue @@ -242,8 +263,12 @@ func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) measures = append(measures, in) } + if err != nil { + err = errors.Join(errCreatingAggregators, err) + } + if matched { - return measures, errs.errorOrNil() + return measures, err } // Apply implicit default view if no explicit matched. @@ -252,15 +277,18 @@ func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) Description: inst.Description, Unit: inst.Unit, } - in, _, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation) - if err != nil { - errs.append(err) + in, _, e := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation) + if e != nil { + if err == nil { + err = errCreatingAggregators + } + err = errors.Join(err, e) } if in != nil { // Ensured to have not seen given matched was false. measures = append(measures, in) } - return measures, errs.errorOrNil() + return measures, err } // addCallback registers a single instrument callback to be run when @@ -329,6 +357,9 @@ func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind Instrum // The view explicitly requested the default aggregation. stream.Aggregation = DefaultAggregationSelector(kind) } + if stream.ExemplarReservoirProviderSelector == nil { + stream.ExemplarReservoirProviderSelector = DefaultExemplarReservoirProviderSelector + } if err := isAggregatorCompatible(kind, stream.Aggregation); err != nil { return nil, 0, fmt.Errorf( @@ -349,7 +380,7 @@ func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind Instrum cv := i.aggregators.Lookup(normID, func() aggVal[N] { b := aggregate.Builder[N]{ Temporality: i.pipeline.reader.temporality(kind), - ReservoirFunc: reservoirFunc[N](stream.Aggregation), + ReservoirFunc: reservoirFunc[N](stream.ExemplarReservoirProviderSelector(stream.Aggregation), i.pipeline.exemplarFilter), } b.Filter = stream.AttributeFilter // A value less than or equal to zero will disable the aggregation @@ -552,24 +583,16 @@ func isAggregatorCompatible(kind InstrumentKind, agg Aggregation) error { // measurement. type pipelines []*pipeline -func newPipelines(res *resource.Resource, readers []Reader, views []View) pipelines { +func newPipelines(res *resource.Resource, readers []Reader, views []View, exemplarFilter exemplar.Filter) pipelines { pipes := make([]*pipeline, 0, len(readers)) for _, r := range readers { - p := newPipeline(res, r, views) + p := newPipeline(res, r, views, exemplarFilter) r.register(p) pipes = append(pipes, p) } return pipes } -func (p pipelines) registerMultiCallback(c multiCallback) metric.Registration { - unregs := make([]func(), len(p)) - for i, pipe := range p { - unregs[i] = pipe.addMultiCallback(c) - } - return unregisterFuncs{f: unregs} -} - type unregisterFuncs struct { embedded.Registration f []func() @@ -602,15 +625,15 @@ func newResolver[N int64 | float64](p pipelines, vc *cache[string, instID]) reso func (r resolver[N]) Aggregators(id Instrument) ([]aggregate.Measure[N], error) { var measures []aggregate.Measure[N] - errs := &multierror{} + var err error for _, i := range r.inserters { - in, err := i.Instrument(id, i.readerDefaultAggregation(id.Kind)) - if err != nil { - errs.append(err) + in, e := i.Instrument(id, i.readerDefaultAggregation(id.Kind)) + if e != nil { + err = errors.Join(err, e) } measures = append(measures, in...) } - return measures, errs.errorOrNil() + return measures, err } // HistogramAggregators returns the histogram Aggregators that must be updated by the instrument @@ -619,37 +642,18 @@ func (r resolver[N]) Aggregators(id Instrument) ([]aggregate.Measure[N], error) func (r resolver[N]) HistogramAggregators(id Instrument, boundaries []float64) ([]aggregate.Measure[N], error) { var measures []aggregate.Measure[N] - errs := &multierror{} + var err error for _, i := range r.inserters { agg := i.readerDefaultAggregation(id.Kind) if histAgg, ok := agg.(AggregationExplicitBucketHistogram); ok && len(boundaries) > 0 { histAgg.Boundaries = boundaries agg = histAgg } - in, err := i.Instrument(id, agg) - if err != nil { - errs.append(err) + in, e := i.Instrument(id, agg) + if e != nil { + err = errors.Join(err, e) } measures = append(measures, in...) } - return measures, errs.errorOrNil() -} - -type multierror struct { - wrapped error - errors []string -} - -func (m *multierror) errorOrNil() error { - if len(m.errors) == 0 { - return nil - } - if m.wrapped == nil { - return errors.New(strings.Join(m.errors, "; ")) - } - return fmt.Errorf("%w: %s", m.wrapped, strings.Join(m.errors, "; ")) -} - -func (m *multierror) append(err error) { - m.errors = append(m.errors, err.Error()) + return measures, err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go b/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go index a82af538e67c..2fca89e5a8e5 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go @@ -42,7 +42,7 @@ func NewMeterProvider(options ...Option) *MeterProvider { flush, sdown := conf.readerSignals() mp := &MeterProvider{ - pipes: newPipelines(conf.res, conf.readers, conf.views), + pipes: newPipelines(conf.res, conf.readers, conf.views, conf.exemplarFilter), forceFlush: flush, shutdown: sdown, } @@ -76,15 +76,17 @@ func (mp *MeterProvider) Meter(name string, options ...metric.MeterOption) metri c := metric.NewMeterConfig(options...) s := instrumentation.Scope{ - Name: name, - Version: c.InstrumentationVersion(), - SchemaURL: c.SchemaURL(), + Name: name, + Version: c.InstrumentationVersion(), + SchemaURL: c.SchemaURL(), + Attributes: c.InstrumentationAttributes(), } global.Info("Meter created", "Name", s.Name, "Version", s.Version, "SchemaURL", s.SchemaURL, + "Attributes", s.Attributes, ) return mp.meters.Lookup(s, func() *meter { diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go index fa43f8469c19..6347060bf414 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go @@ -5,5 +5,5 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" // version is the current release version of the metric SDK in use. func version() string { - return "1.31.0" + return "1.32.0" } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/view.go b/vendor/go.opentelemetry.io/otel/sdk/metric/view.go index cd08c673248a..630890f42631 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/view.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/view.go @@ -96,11 +96,12 @@ func NewView(criteria Instrument, mask Stream) View { return func(i Instrument) (Stream, bool) { if matchFunc(i) { return Stream{ - Name: nonZero(mask.Name, i.Name), - Description: nonZero(mask.Description, i.Description), - Unit: nonZero(mask.Unit, i.Unit), - Aggregation: agg, - AttributeFilter: mask.AttributeFilter, + Name: nonZero(mask.Name, i.Name), + Description: nonZero(mask.Description, i.Description), + Unit: nonZero(mask.Unit, i.Unit), + Aggregation: agg, + AttributeFilter: mask.AttributeFilter, + ExemplarReservoirProviderSelector: mask.ExemplarReservoirProviderSelector, }, true } return Stream{}, false diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go index 95a61d61d49c..c02aeefdde53 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go @@ -7,7 +7,6 @@ import ( "context" "errors" "fmt" - "strings" ) // ErrPartialResource is returned by a detector when complete source @@ -57,62 +56,37 @@ func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) { // these errors will be returned. Otherwise, nil is returned. func detect(ctx context.Context, res *Resource, detectors []Detector) error { var ( - r *Resource - errs detectErrs - err error + r *Resource + err error + e error ) for _, detector := range detectors { if detector == nil { continue } - r, err = detector.Detect(ctx) - if err != nil { - errs = append(errs, err) - if !errors.Is(err, ErrPartialResource) { + r, e = detector.Detect(ctx) + if e != nil { + err = errors.Join(err, e) + if !errors.Is(e, ErrPartialResource) { continue } } - r, err = Merge(res, r) - if err != nil { - errs = append(errs, err) + r, e = Merge(res, r) + if e != nil { + err = errors.Join(err, e) } *res = *r } - if len(errs) == 0 { - return nil - } - if errors.Is(errs, ErrSchemaURLConflict) { - // If there has been a merge conflict, ensure the resource has no - // schema URL. - res.schemaURL = "" - } - return errs -} - -type detectErrs []error - -func (e detectErrs) Error() string { - errStr := make([]string, len(e)) - for i, err := range e { - errStr[i] = fmt.Sprintf("* %s", err) - } - - format := "%d errors occurred detecting resource:\n\t%s" - return fmt.Sprintf(format, len(e), strings.Join(errStr, "\n\t")) -} + if err != nil { + if errors.Is(err, ErrSchemaURLConflict) { + // If there has been a merge conflict, ensure the resource has no + // schema URL. + res.schemaURL = "" + } -func (e detectErrs) Unwrap() error { - switch len(e) { - case 0: - return nil - case 1: - return e[0] + err = fmt.Errorf("error detecting resource: %w", err) } - return e[1:] -} - -func (e detectErrs) Is(target error) bool { - return len(e) != 0 && errors.Is(e[0], target) + return err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index 6ac1cdbf7b45..cf3c88e15cd6 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -20,15 +20,13 @@ type ( // telemetrySDK is a Detector that provides information about // the OpenTelemetry SDK used. This Detector is included as a // builtin. If these resource attributes are not wanted, use - // the WithTelemetrySDK(nil) or WithoutBuiltin() options to - // explicitly disable them. + // resource.New() to explicitly disable them. telemetrySDK struct{} // host is a Detector that provides information about the host // being run on. This Detector is included as a builtin. If // these resource attributes are not wanted, use the - // WithHost(nil) or WithoutBuiltin() options to explicitly - // disable them. + // resource.New() to explicitly disable them. host struct{} stringDetector struct { diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index 4ce757dfd6b1..ccc97e1b6625 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -280,6 +280,7 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { // // It is up to the exporter to implement any type of retry logic if a batch is failing // to be exported, since it is specific to the protocol and backend being sent to. + clear(bsp.batch) // Erase elements to let GC collect objects bsp.batch = bsp.batch[:0] if err != nil { diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index 14c2e5bebda0..185aa7c08f7c 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -139,9 +139,10 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T name = defaultTracerName } is := instrumentation.Scope{ - Name: name, - Version: c.InstrumentationVersion(), - SchemaURL: c.SchemaURL(), + Name: name, + Version: c.InstrumentationVersion(), + SchemaURL: c.SchemaURL(), + Attributes: c.InstrumentationAttributes(), } t, ok := func() (trace.Tracer, bool) { @@ -168,7 +169,7 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T // slowing down all tracing consumers. // - Logging code may be instrumented with tracing and deadlock because it could try // acquiring the same non-reentrant mutex. - global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL) + global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL, "attributes", is.Attributes) } return t } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index 730fb85c3ef6..17f883c2c86f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -639,10 +639,7 @@ func (s *recordingSpan) dedupeAttrsFromRecord(record map[attribute.Key]int) { record[a.Key] = len(unique) - 1 } } - // s.attributes have element types of attribute.KeyValue. These types are - // not pointers and they themselves do not contain pointer fields, - // therefore the duplicate values do not need to be zeroed for them to be - // garbage collected. + clear(s.attributes[len(unique):]) // Erase unneeded elements to let GC collect objects. s.attributes = unique } diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index dc1eaa8e9d06..0b214d3fe9f4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -5,5 +5,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.31.0" + return "1.32.0" } diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index 6d3c7b1f40ec..59e24816137f 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.31.0" + return "1.32.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index cdebdb5eb78a..c04b12f6b74c 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,19 +3,13 @@ module-sets: stable-v1: - version: v1.31.0 + version: v1.32.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus - go.opentelemetry.io/otel/bridge/opencensus/test - go.opentelemetry.io/otel/bridge/opentracing - go.opentelemetry.io/otel/bridge/opentracing/test - - go.opentelemetry.io/otel/example/dice - - go.opentelemetry.io/otel/example/namedtracer - - go.opentelemetry.io/otel/example/opencensus - - go.opentelemetry.io/otel/example/otel-collector - - go.opentelemetry.io/otel/example/passthrough - - go.opentelemetry.io/otel/example/zipkin - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/otlp/otlptrace @@ -29,12 +23,11 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.53.0 + version: v0.54.0 modules: - - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.7.0 + version: v0.8.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log @@ -42,7 +35,7 @@ module-sets: - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.10 + version: v0.0.11 modules: - go.opentelemetry.io/otel/schema excluded-modules: diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go index 2fc0a71f9441..76fa5fea95f2 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -54,9 +54,18 @@ func init() { balancer.Register(pickfirstBuilder{}) } -// enableHealthListenerKeyType is a unique key type used in resolver attributes -// to indicate whether the health listener usage is enabled. -type enableHealthListenerKeyType struct{} +type ( + // enableHealthListenerKeyType is a unique key type used in resolver + // attributes to indicate whether the health listener usage is enabled. + enableHealthListenerKeyType struct{} + // managedByPickfirstKeyType is an attribute key type to inform Outlier + // Detection that the generic health listener is being used. + // TODO: https://github.com/grpc/grpc-go/issues/7915 - Remove this when + // implementing the dualstack design. This is a hack. Once Dualstack is + // completed, outlier detection will stop sending ejection updates through + // the connectivity listener. + managedByPickfirstKeyType struct{} +) var ( logger = grpclog.Component("pick-first-leaf-lb") @@ -140,6 +149,17 @@ func EnableHealthListener(state resolver.State) resolver.State { return state } +// IsManagedByPickfirst returns whether an address belongs to a SubConn +// managed by the pickfirst LB policy. +// TODO: https://github.com/grpc/grpc-go/issues/7915 - This is a hack to disable +// outlier_detection via the with connectivity listener when using pick_first. +// Once Dualstack changes are complete, all SubConns will be created by +// pick_first and outlier detection will only use the health listener for +// ejection. This hack can then be removed. +func IsManagedByPickfirst(addr resolver.Address) bool { + return addr.BalancerAttributes.Value(managedByPickfirstKeyType{}) != nil +} + type pfConfig struct { serviceconfig.LoadBalancingConfig `json:"-"` @@ -166,6 +186,7 @@ type scData struct { } func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { + addr.BalancerAttributes = addr.BalancerAttributes.WithValue(managedByPickfirstKeyType{}, true) sd := &scData{ rawConnectivityState: connectivity.Idle, effectiveState: connectivity.Idle, diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index 905817b5fc7b..c2688376ae74 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -34,7 +34,15 @@ import ( "google.golang.org/grpc/status" ) -var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) +var ( + setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) + // noOpRegisterHealthListenerFn is used when client side health checking is + // disabled. It sends a single READY update on the registered listener. + noOpRegisterHealthListenerFn = func(_ context.Context, listener func(balancer.SubConnState)) func() { + listener(balancer.SubConnState{ConnectivityState: connectivity.Ready}) + return func() {} + } +) // ccBalancerWrapper sits between the ClientConn and the Balancer. // @@ -277,10 +285,17 @@ type healthData struct { // to the LB policy. This is stored to avoid sending updates when the // SubConn has already exited connectivity state READY. connectivityState connectivity.State + // closeHealthProducer stores function to close the ref counted health + // producer. The health producer is automatically closed when the SubConn + // state changes. + closeHealthProducer func() } func newHealthData(s connectivity.State) *healthData { - return &healthData{connectivityState: s} + return &healthData{ + connectivityState: s, + closeHealthProducer: func() {}, + } } // updateState is invoked by grpc to push a subConn state update to the @@ -413,6 +428,37 @@ func (acbw *acBalancerWrapper) closeProducers() { } } +// healthProducerRegisterFn is a type alias for the health producer's function +// for registering listeners. +type healthProducerRegisterFn = func(context.Context, balancer.SubConn, string, func(balancer.SubConnState)) func() + +// healthListenerRegFn returns a function to register a listener for health +// updates. If client side health checks are disabled, the registered listener +// will get a single READY (raw connectivity state) update. +// +// Client side health checking is enabled when all the following +// conditions are satisfied: +// 1. Health checking is not disabled using the dial option. +// 2. The health package is imported. +// 3. The health check config is present in the service config. +func (acbw *acBalancerWrapper) healthListenerRegFn() func(context.Context, func(balancer.SubConnState)) func() { + if acbw.ccb.cc.dopts.disableHealthCheck { + return noOpRegisterHealthListenerFn + } + regHealthLisFn := internal.RegisterClientHealthCheckListener + if regHealthLisFn == nil { + // The health package is not imported. + return noOpRegisterHealthListenerFn + } + cfg := acbw.ac.cc.healthCheckConfig() + if cfg == nil { + return noOpRegisterHealthListenerFn + } + return func(ctx context.Context, listener func(balancer.SubConnState)) func() { + return regHealthLisFn.(healthProducerRegisterFn)(ctx, acbw, cfg.ServiceName, listener) + } +} + // RegisterHealthListener accepts a health listener from the LB policy. It sends // updates to the health listener as long as the SubConn's connectivity state // doesn't change and a new health listener is not registered. To invalidate @@ -421,6 +467,7 @@ func (acbw *acBalancerWrapper) closeProducers() { func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.SubConnState)) { acbw.healthMu.Lock() defer acbw.healthMu.Unlock() + acbw.healthData.closeHealthProducer() // listeners should not be registered when the connectivity state // isn't Ready. This may happen when the balancer registers a listener // after the connectivityState is updated, but before it is notified @@ -436,6 +483,7 @@ func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.Sub return } + registerFn := acbw.healthListenerRegFn() acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || acbw.ccb.balancer == nil { return @@ -443,10 +491,25 @@ func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.Sub // Don't send updates if a new listener is registered. acbw.healthMu.Lock() defer acbw.healthMu.Unlock() - curHD := acbw.healthData - if curHD != hd { + if acbw.healthData != hd { return } - listener(balancer.SubConnState{ConnectivityState: connectivity.Ready}) + // Serialize the health updates from the health producer with + // other calls into the LB policy. + listenerWrapper := func(scs balancer.SubConnState) { + acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { + if ctx.Err() != nil || acbw.ccb.balancer == nil { + return + } + acbw.healthMu.Lock() + defer acbw.healthMu.Unlock() + if acbw.healthData != hd { + return + } + listener(scs) + }) + } + + hd.closeHealthProducer = registerFn(ctx, listenerWrapper) }) } diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 9e9d0806995c..21dd72969aee 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.1 +// protoc-gen-go v1.35.2 // protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index e163a473df93..bd5fe22b6af6 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -32,6 +32,8 @@ import ( "google.golang.org/grpc/internal/envconfig" ) +const alpnFailureHelpMessage = "If you upgraded from a grpc-go version earlier than 1.67, your TLS connections may have stopped working due to ALPN enforcement. For more details, see: https://github.com/grpc/grpc-go/issues/434" + var logger = grpclog.Component("credentials") // TLSInfo contains the auth information for a TLS authenticated connection. @@ -128,7 +130,7 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon if np == "" { if envconfig.EnforceALPNEnabled { conn.Close() - return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property. %s", alpnFailureHelpMessage) } logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName) } @@ -158,7 +160,7 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) if cs.NegotiatedProtocol == "" { if envconfig.EnforceALPNEnabled { conn.Close() - return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property. %s", alpnFailureHelpMessage) } else if logger.V(2) { logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases") } diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 7494ae591f16..f3a045296a46 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -428,6 +428,11 @@ func WithTimeout(d time.Duration) DialOption { // returned by f, gRPC checks the error's Temporary() method to decide if it // should try to reconnect to the network address. // +// Note that gRPC by default performs name resolution on the target passed to +// NewClient. To bypass name resolution and cause the target string to be +// passed directly to the dialer here instead, use the "passthrough" resolver +// by specifying it in the target string, e.g. "passthrough:target". +// // Note: All supported releases of Go (as of December 2023) override the OS // defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive // with OS defaults for keepalive time and interval, use a net.Dialer that sets diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 26e16d91924f..467de16bdbcd 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.1 +// protoc-gen-go v1.35.2 // protoc v5.27.1 // source: grpc/health/v1/health.proto diff --git a/vendor/google.golang.org/grpc/health/producer.go b/vendor/google.golang.org/grpc/health/producer.go new file mode 100644 index 000000000000..f938e5790c7b --- /dev/null +++ b/vendor/google.golang.org/grpc/health/producer.go @@ -0,0 +1,106 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package health + +import ( + "context" + "sync" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/status" +) + +func init() { + producerBuilderSingleton = &producerBuilder{} + internal.RegisterClientHealthCheckListener = registerClientSideHealthCheckListener +} + +type producerBuilder struct{} + +var producerBuilderSingleton *producerBuilder + +// Build constructs and returns a producer and its cleanup function. +func (*producerBuilder) Build(cci any) (balancer.Producer, func()) { + p := &healthServiceProducer{ + cc: cci.(grpc.ClientConnInterface), + cancel: func() {}, + } + return p, func() { + p.mu.Lock() + defer p.mu.Unlock() + p.cancel() + } +} + +type healthServiceProducer struct { + // The following fields are initialized at build time and read-only after + // that and therefore do not need to be guarded by a mutex. + cc grpc.ClientConnInterface + + mu sync.Mutex + cancel func() +} + +// registerClientSideHealthCheckListener accepts a listener to provide server +// health state via the health service. +func registerClientSideHealthCheckListener(ctx context.Context, sc balancer.SubConn, serviceName string, listener func(balancer.SubConnState)) func() { + pr, closeFn := sc.GetOrBuildProducer(producerBuilderSingleton) + p := pr.(*healthServiceProducer) + p.mu.Lock() + defer p.mu.Unlock() + p.cancel() + if listener == nil { + return closeFn + } + + ctx, cancel := context.WithCancel(ctx) + p.cancel = cancel + + go p.startHealthCheck(ctx, sc, serviceName, listener) + return closeFn +} + +func (p *healthServiceProducer) startHealthCheck(ctx context.Context, sc balancer.SubConn, serviceName string, listener func(balancer.SubConnState)) { + newStream := func(method string) (any, error) { + return p.cc.NewStream(ctx, &grpc.StreamDesc{ServerStreams: true}, method) + } + + setConnectivityState := func(state connectivity.State, err error) { + listener(balancer.SubConnState{ + ConnectivityState: state, + ConnectionError: err, + }) + } + + // Call the function through the internal variable as tests use it for + // mocking. + err := internal.HealthCheckFunc(ctx, newStream, setConnectivityState, serviceName) + if err == nil { + return + } + if status.Code(err) == codes.Unimplemented { + logger.Errorf("Subchannel health check is unimplemented at server side, thus health check is disabled for SubConn %p", sc) + } else { + logger.Errorf("Health checking failed for SubConn %p: %v", sc, err) + } +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 6e7dd6b77270..1e42b6fdc872 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -49,7 +49,7 @@ var ( // XDSFallbackSupport is the env variable that controls whether support for // xDS fallback is turned on. If this is unset or is false, only the first // xDS server in the list of server configs will be used. - XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false) + XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", true) // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used // instead of the exiting pickfirst implementation. This can be enabled by // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST" diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 29f234acb1b9..9afeb444d453 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -53,4 +53,10 @@ var ( // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") + + // XDSDualstackEndpointsEnabled is true if gRPC should read the + // "additional addresses" in the xDS endpoint resource. + // TODO: https://github.com/grpc/grpc-go/issues/7866 - Control this using + // an env variable when all LB policies handle endpoints. + XDSDualstackEndpointsEnabled = false ) diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 3afc1813440e..c17b98194b3c 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -31,6 +31,10 @@ import ( var ( // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker + // RegisterClientHealthCheckListener is used to provide a listener for + // updates from the client-side health checking service. It returns a + // function that can be called to stop the health producer. + RegisterClientHealthCheckListener any // func(ctx context.Context, sc balancer.SubConn, serviceName string, listener func(balancer.SubConnState)) func() // BalancerUnregister is exported by package balancer to unregister a balancer. BalancerUnregister func(name string) // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index d9305a65d88f..3dea23573518 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -498,5 +498,5 @@ func mapRecvMsgError(err error) error { if strings.Contains(err.Error(), "body closed by handler") { return status.Error(codes.Canceled, err.Error()) } - return connectionErrorf(true, err, err.Error()) + return connectionErrorf(true, err, "%s", err.Error()) } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 0055fddd7ecf..997b0a59b586 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -564,7 +564,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade t.logger.Infof("Aborting the stream early: %v", errMsg) } t.controlBuf.put(&earlyAbortStream{ - httpStatus: 405, + httpStatus: http.StatusMethodNotAllowed, streamID: streamID, contentSubtype: s.contentSubtype, status: status.New(codes.Internal, errMsg), @@ -585,7 +585,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade stat = status.New(codes.PermissionDenied, err.Error()) } t.controlBuf.put(&earlyAbortStream{ - httpStatus: 200, + httpStatus: http.StatusOK, streamID: s.id, contentSubtype: s.contentSubtype, status: stat, diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go index 58019722d01b..30cb61c65e19 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go @@ -21,7 +21,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.1 +// protoc-gen-go v1.35.2 // protoc v5.27.1 // source: grpc/reflection/v1/reflection.proto diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go index 4d7b654ef544..3747f6d5334f 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.1 +// protoc-gen-go v1.35.2 // protoc v5.27.1 // grpc/reflection/v1alpha/reflection.proto is a deprecated file. diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 16065a027ae8..9d5b2884d14e 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -1360,8 +1360,16 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt } return err } - defer d.Free() + freed := false + dataFree := func() { + if !freed { + d.Free() + freed = true + } + } + defer dataFree() df := func(v any) error { + defer dataFree() if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 7e83027d1994..8d451e07c7cc 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -268,18 +268,21 @@ func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { return &serviceconfig.ParseResult{Config: &sc} } +func isValidRetryPolicy(jrp *jsonRetryPolicy) bool { + return jrp.MaxAttempts > 1 && + jrp.InitialBackoff > 0 && + jrp.MaxBackoff > 0 && + jrp.BackoffMultiplier > 0 && + len(jrp.RetryableStatusCodes) > 0 +} + func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalserviceconfig.RetryPolicy, err error) { if jrp == nil { return nil, nil } - if jrp.MaxAttempts <= 1 || - jrp.InitialBackoff <= 0 || - jrp.MaxBackoff <= 0 || - jrp.BackoffMultiplier <= 0 || - len(jrp.RetryableStatusCodes) == 0 { - logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) - return nil, nil + if !isValidRetryPolicy(jrp) { + return nil, fmt.Errorf("invalid retry policy (%+v): ", jrp) } if jrp.MaxAttempts < maxAttempts { diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 17e2267b3320..54adbbced7a6 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -1766,7 +1766,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { return err } if err == io.ErrUnexpectedEOF { - err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + err = status.Error(codes.Internal, io.ErrUnexpectedEOF.Error()) } return toRPCErr(err) } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index d2bba7f3d9ec..0e03fa4d4f7e 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.69.4" +const Version = "1.70.0" diff --git a/vendor/modules.txt b/vendor/modules.txt index ac5052f07022..34b90ee7e026 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -414,7 +414,7 @@ github.com/containerd/platforms # github.com/containerd/plugin v1.0.0 ## explicit; go 1.20 github.com/containerd/plugin -# github.com/containerd/stargz-snapshotter v0.16.3 +# github.com/containerd/stargz-snapshotter v0.16.2-0.20250126124854-1281fc2cd2ea ## explicit; go 1.22.0 github.com/containerd/stargz-snapshotter/cache github.com/containerd/stargz-snapshotter/fs @@ -432,7 +432,7 @@ github.com/containerd/stargz-snapshotter/task github.com/containerd/stargz-snapshotter/util/cacheutil github.com/containerd/stargz-snapshotter/util/namedmutex github.com/containerd/stargz-snapshotter/util/testutil -# github.com/containerd/stargz-snapshotter/estargz v0.16.3 +# github.com/containerd/stargz-snapshotter/estargz v0.16.2-0.20250126124854-1281fc2cd2ea ## explicit; go 1.22.0 github.com/containerd/stargz-snapshotter/estargz github.com/containerd/stargz-snapshotter/estargz/errorutil @@ -586,7 +586,7 @@ github.com/google/uuid github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities -# github.com/hanwen/go-fuse/v2 v2.6.3 +# github.com/hanwen/go-fuse/v2 v2.7.2 ## explicit; go 1.17 github.com/hanwen/go-fuse/v2/fs github.com/hanwen/go-fuse/v2/fuse @@ -816,7 +816,7 @@ github.com/tonistiigi/vt100 # github.com/urfave/cli v1.22.16 ## explicit; go 1.11 github.com/urfave/cli -# github.com/vbatts/tar-split v0.11.6 +# github.com/vbatts/tar-split v0.11.7 ## explicit; go 1.17 github.com/vbatts/tar-split/archive/tar # github.com/vishvananda/netlink v1.3.1-0.20240922070040-084abd93d350 @@ -850,7 +850,7 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.31.0 +# go.opentelemetry.io/otel v1.32.0 ## explicit; go 1.22 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -909,12 +909,12 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry # go.opentelemetry.io/otel/exporters/prometheus v0.42.0 ## explicit; go 1.20 go.opentelemetry.io/otel/exporters/prometheus -# go.opentelemetry.io/otel/metric v1.31.0 +# go.opentelemetry.io/otel/metric v1.32.0 ## explicit; go 1.22 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.31.0 +# go.opentelemetry.io/otel/sdk v1.32.0 ## explicit; go 1.22 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation @@ -923,7 +923,7 @@ go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace go.opentelemetry.io/otel/sdk/trace/tracetest -# go.opentelemetry.io/otel/sdk/metric v1.31.0 +# go.opentelemetry.io/otel/sdk/metric v1.32.0 ## explicit; go 1.22 go.opentelemetry.io/otel/sdk/metric go.opentelemetry.io/otel/sdk/metric/exemplar @@ -931,7 +931,7 @@ go.opentelemetry.io/otel/sdk/metric/internal go.opentelemetry.io/otel/sdk/metric/internal/aggregate go.opentelemetry.io/otel/sdk/metric/internal/x go.opentelemetry.io/otel/sdk/metric/metricdata -# go.opentelemetry.io/otel/trace v1.31.0 +# go.opentelemetry.io/otel/trace v1.32.0 ## explicit; go 1.22 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded @@ -997,14 +997,14 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.6.0 ## explicit; go 1.18 golang.org/x/time/rate -# google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 +# google.golang.org/genproto/googleapis/api v0.0.0-20241202173237-19429a94021a ## explicit; go 1.21 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a ## explicit; go 1.21 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.69.4 +# google.golang.org/grpc v1.70.0 ## explicit; go 1.22 google.golang.org/grpc google.golang.org/grpc/attributes From 1543318cb4fde14ae4d9fb43640faaf40828adb0 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Wed, 15 Jan 2025 13:31:05 +0100 Subject: [PATCH 4/5] vendor: nydus-snapshotter Signed-off-by: Sebastiaan van Stijn --- go.mod | 2 +- go.sum | 4 ++-- vendor/modules.txt | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index a266840187bd..db622ee75949 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/containerd/go-cni v1.1.12 github.com/containerd/go-runc v1.1.0 github.com/containerd/log v0.1.0 - github.com/containerd/nydus-snapshotter v0.15.0 + github.com/containerd/nydus-snapshotter v0.15.1-0.20250126020009-9089ad11ad05 github.com/containerd/platforms v1.0.0-rc.1 github.com/containerd/stargz-snapshotter v0.16.2-0.20250126124854-1281fc2cd2ea github.com/containerd/stargz-snapshotter/estargz v0.16.2-0.20250126124854-1281fc2cd2ea diff --git a/go.sum b/go.sum index d4ae0e2e8030..eee0d1d8cdc5 100644 --- a/go.sum +++ b/go.sum @@ -109,8 +109,8 @@ github.com/containerd/go-runc v1.1.0 h1:OX4f+/i2y5sUT7LhmcJH7GYrjjhHa1QI4e8yO0gG github.com/containerd/go-runc v1.1.0/go.mod h1:xJv2hFF7GvHtTJd9JqTS2UVxMkULUYw4JN5XAUZqH5U= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/containerd/nydus-snapshotter v0.15.0 h1:RqZRs1GPeM6T3wmuxJV9u+2Rg4YETVMwTmiDeX+iWC8= -github.com/containerd/nydus-snapshotter v0.15.0/go.mod h1:biq0ijpeZe0I5yZFSJyHzFSjjRZQ7P7y/OuHyd7hYOw= +github.com/containerd/nydus-snapshotter v0.15.1-0.20250126020009-9089ad11ad05 h1:r1OXtbysi4+3nyGY2vOm4UCaRe3QXDYLTlIixCdXQqE= +github.com/containerd/nydus-snapshotter v0.15.1-0.20250126020009-9089ad11ad05/go.mod h1:FfwH2KBkNYoisK/e+KsmNr7xTU53DmnavQHMFOcXwfM= github.com/containerd/platforms v1.0.0-rc.1 h1:83KIq4yy1erSRgOVHNk1HYdPvzdJ5CnsWaRoJX4C41E= github.com/containerd/platforms v1.0.0-rc.1/go.mod h1:J71L7B+aiM5SdIEqmd9wp6THLVRzJGXfNuWCZCllLA4= github.com/containerd/plugin v1.0.0 h1:c8Kf1TNl6+e2TtMHZt+39yAPDbouRH9WAToRjex483Y= diff --git a/vendor/modules.txt b/vendor/modules.txt index 34b90ee7e026..ee72e53f96fe 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -403,7 +403,7 @@ github.com/containerd/go-runc # github.com/containerd/log v0.1.0 ## explicit; go 1.20 github.com/containerd/log -# github.com/containerd/nydus-snapshotter v0.15.0 +# github.com/containerd/nydus-snapshotter v0.15.1-0.20250126020009-9089ad11ad05 ## explicit; go 1.22.0 github.com/containerd/nydus-snapshotter/pkg/converter github.com/containerd/nydus-snapshotter/pkg/converter/tool From a2b6a112ab878040754b50d0a338106d137d86e1 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Wed, 15 Jan 2025 13:33:32 +0100 Subject: [PATCH 5/5] remove explicit downgrade Signed-off-by: Sebastiaan van Stijn --- go.mod | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/go.mod b/go.mod index db622ee75949..ebd3998cc450 100644 --- a/go.mod +++ b/go.mod @@ -186,20 +186,3 @@ require ( tags.cncf.io/container-device-interface v0.8.0 // indirect tags.cncf.io/container-device-interface/specs-go v0.8.0 // indirect ) - -exclude ( - // TODO(thaJeztah): remove once fuse-overlayfs-snapshotter, nydus-snapshotter, and stargz-snapshotter updated to containerd v2.0.2 and downgraded these dependencies. - // - // These dependencies were updated to "master" in some modules we depend on, - // but have no code-changes since their last release. Unfortunately, this also - // causes a ripple effect, forcing all users of the containerd module to also - // update these dependencies to an unrelease / un-tagged version. - // - // Both these dependencies will unlikely do a new release in the near future, - // so exclude these versions so that we can downgrade to the current release. - // - // For additional details, see this PR and links mentioned in that PR: - // https://github.com/kubernetes-sigs/kustomize/pull/5830#issuecomment-2569960859 - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 -)