From 34f6310e5c28aab7d74a46c96808c406c12182b4 Mon Sep 17 00:00:00 2001 From: morvencao Date: Mon, 6 May 2024 09:04:40 +0000 Subject: [PATCH] add GC for cloudevents work client. Signed-off-by: morvencao --- go.mod | 19 +- go.sum | 42 +- pkg/addonmanager/cloudevents/manager.go | 32 + .../x/crypto/internal/poly1305/sum_ppc64le.s | 14 +- vendor/golang.org/x/net/http2/frame.go | 31 + vendor/golang.org/x/net/http2/pipe.go | 11 +- vendor/golang.org/x/net/http2/server.go | 13 +- vendor/golang.org/x/net/http2/testsync.go | 331 ++++++++ vendor/golang.org/x/net/http2/transport.go | 307 ++++++-- vendor/golang.org/x/net/websocket/client.go | 55 +- vendor/golang.org/x/net/websocket/dial.go | 11 +- vendor/golang.org/x/sys/unix/aliases.go | 2 +- .../x/sys/unix/syscall_darwin_libSystem.go | 2 +- .../golang.org/x/sys/unix/syscall_freebsd.go | 12 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 99 +++ .../golang.org/x/sys/unix/zsyscall_linux.go | 10 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 60 ++ .../pkg/admission/plugin/cel/composition.go | 2 +- .../controller_reconcile.go | 5 +- .../apiserver/pkg/cel/environment/base.go | 16 +- .../metadata/metadatainformer/informer.go | 215 ++++++ .../metadata/metadatainformer/interface.go | 53 ++ .../metadata/metadatalister/interface.go | 40 + .../metadata/metadatalister/lister.go | 91 +++ .../client-go/metadata/metadatalister/shim.go | 87 +++ .../metrics/prometheus/slis/metrics.go | 1 + vendor/k8s.io/controller-manager/LICENSE | 201 +++++ .../pkg/informerfactory/informer_factory.go | 56 ++ .../kube-openapi/pkg/builder3/openapi.go | 3 + .../k8s.io/kube-openapi/pkg/common/common.go | 3 + .../k8s.io/kube-openapi/pkg/schemaconv/smd.go | 3 - vendor/modules.txt | 27 +- .../pkg/cloudevents/generic/baseclient.go | 4 +- .../work/garbagecollector/garbagecollector.go | 709 ++++++++++++++++++ .../work/garbagecollector/operations.go | 264 +++++++ 35 files changed, 2680 insertions(+), 151 deletions(-) create mode 100644 vendor/golang.org/x/net/http2/testsync.go create mode 100644 vendor/k8s.io/client-go/metadata/metadatainformer/informer.go create mode 100644 vendor/k8s.io/client-go/metadata/metadatainformer/interface.go create mode 100644 vendor/k8s.io/client-go/metadata/metadatalister/interface.go create mode 100644 vendor/k8s.io/client-go/metadata/metadatalister/lister.go create mode 100644 vendor/k8s.io/client-go/metadata/metadatalister/shim.go create mode 100644 vendor/k8s.io/controller-manager/LICENSE create mode 100644 vendor/k8s.io/controller-manager/pkg/informerfactory/informer_factory.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/garbagecollector/garbagecollector.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/garbagecollector/operations.go diff --git a/go.mod b/go.mod index 6bab039c6..deac463e7 100644 --- a/go.mod +++ b/go.mod @@ -18,9 +18,9 @@ require ( k8s.io/api v0.29.2 k8s.io/apiextensions-apiserver v0.29.0 k8s.io/apimachinery v0.29.2 - k8s.io/apiserver v0.29.0 + k8s.io/apiserver v0.29.2 k8s.io/client-go v0.29.2 - k8s.io/component-base v0.29.1 + k8s.io/component-base v0.29.2 k8s.io/klog/v2 v2.120.1 k8s.io/utils v0.0.0-20240310230437-4693a0247e57 open-cluster-management.io/api v0.13.0 @@ -109,13 +109,13 @@ require ( go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 // indirect - golang.org/x/crypto v0.19.0 // indirect + golang.org/x/crypto v0.21.0 // indirect golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect - golang.org/x/net v0.21.0 // indirect + golang.org/x/net v0.23.0 // indirect golang.org/x/oauth2 v0.16.0 // indirect golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/term v0.17.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.8 // indirect @@ -128,10 +128,13 @@ require ( gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/kms v0.29.0 // indirect - k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/controller-manager v0.29.2 // indirect + k8s.io/kms v0.29.2 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) + +replace open-cluster-management.io/sdk-go => github.com/morvencao/ocm-sdk-go v0.0.0-20240506074627-941a4e4cfefb diff --git a/go.sum b/go.sum index 8235a81be..725f107de 100644 --- a/go.sum +++ b/go.sum @@ -180,6 +180,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/morvencao/ocm-sdk-go v0.0.0-20240506074627-941a4e4cfefb h1:3D3BBH7obOq0AgLqZzbOADJFUsNyFBj5/zQE7a2EH30= +github.com/morvencao/ocm-sdk-go v0.0.0-20240506074627-941a4e4cfefb/go.mod h1:vuhp/e3Y4utj8h+BBt+AauZHDE5zWvwIxprgIqQN7f4= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= @@ -305,8 +307,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc= golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -321,8 +323,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -347,13 +349,13 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= @@ -369,8 +371,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= -golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -425,24 +427,24 @@ k8s.io/apiextensions-apiserver v0.29.0 h1:0VuspFG7Hj+SxyF/Z/2T0uFbI5gb5LRgEyUVE3 k8s.io/apiextensions-apiserver v0.29.0/go.mod h1:TKmpy3bTS0mr9pylH0nOt/QzQRrW7/h7yLdRForMZwc= k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= -k8s.io/apiserver v0.29.0 h1:Y1xEMjJkP+BIi0GSEv1BBrf1jLU9UPfAnnGGbbDdp7o= -k8s.io/apiserver v0.29.0/go.mod h1:31n78PsRKPmfpee7/l9NYEv67u6hOL6AfcE761HapDM= +k8s.io/apiserver v0.29.2 h1:+Z9S0dSNr+CjnVXQePG8TcBWHr3Q7BmAr7NraHvsMiQ= +k8s.io/apiserver v0.29.2/go.mod h1:B0LieKVoyU7ykQvPFm7XSdIHaCHSzCzQWPFa5bqbeMQ= k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg= k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA= -k8s.io/component-base v0.29.1 h1:MUimqJPCRnnHsskTTjKD+IC1EHBbRCVyi37IoFBrkYw= -k8s.io/component-base v0.29.1/go.mod h1:fP9GFjxYrLERq1GcWWZAE3bqbNcDKDytn2srWuHTtKc= +k8s.io/component-base v0.29.2 h1:lpiLyuvPA9yV1aQwGLENYyK7n/8t6l3nn3zAtFTJYe8= +k8s.io/component-base v0.29.2/go.mod h1:BfB3SLrefbZXiBfbM+2H1dlat21Uewg/5qtKOl8degM= +k8s.io/controller-manager v0.29.2 h1:S99UKzjvyFWG4WZWaWQ+iu64X9axwzbi4152tFd73+4= +k8s.io/controller-manager v0.29.2/go.mod h1:xghbiyv5l/SVA5yVvRuGDmNVJEGl7MQqPAD0hvjZLhM= k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kms v0.29.0 h1:KJ1zaZt74CgvgV3NR7tnURJ/mJOKC5X3nwon/WdwgxI= -k8s.io/kms v0.29.0/go.mod h1:mB0f9HLxRXeXUfHfn1A7rpwOlzXI1gIWu86z6buNoYA= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/kms v0.29.2 h1:MDsbp98gSlEQs7K7dqLKNNTwKFQRYYvO4UOlBOjNy6Y= +k8s.io/kms v0.29.2/go.mod h1:s/9RC4sYRZ/6Tn6yhNjbfJuZdb8LzlXhdlBnKizeFDo= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0gQBEuevE/AaBsHY= k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= open-cluster-management.io/api v0.13.0 h1:dlcJEZlNlE0DmSDctK2s7iWKg9l+Tgb0V78Z040nMuk= open-cluster-management.io/api v0.13.0/go.mod h1:CuCPEzXDvOyxBB0H1d1eSeajbHqaeGEKq9c63vQc63w= -open-cluster-management.io/sdk-go v0.13.1-0.20240416062924-20307e6fe090 h1:zFmHuW+ztdfUUNslqNW+H1WEcfdEUQHoRDbmdajX340= -open-cluster-management.io/sdk-go v0.13.1-0.20240416062924-20307e6fe090/go.mod h1:w2OaxtCyegxeyFLU42UQ3oxUz01QdsBQkcHI17T/l48= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/controller-runtime v0.17.2 h1:FwHwD1CTUemg0pW2otk7/U5/i5m2ymzvOXdbeGOUvw0= diff --git a/pkg/addonmanager/cloudevents/manager.go b/pkg/addonmanager/cloudevents/manager.go index 6d2b71414..506cd4a6d 100644 --- a/pkg/addonmanager/cloudevents/manager.go +++ b/pkg/addonmanager/cloudevents/manager.go @@ -4,13 +4,20 @@ import ( "context" "time" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + cacheddiscovery "k8s.io/client-go/discovery/cached/memory" "k8s.io/client-go/dynamic" "k8s.io/client-go/dynamic/dynamicinformer" kubeinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/metadata" + "k8s.io/client-go/metadata/metadatainformer" "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" "k8s.io/client-go/tools/cache" + "k8s.io/controller-manager/pkg/informerfactory" addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" addoninformers "open-cluster-management.io/api/client/addon/informers/externalversions" @@ -21,6 +28,7 @@ import ( "open-cluster-management.io/addon-framework/pkg/addonmanager" "open-cluster-management.io/addon-framework/pkg/index" cloudeventswork "open-cluster-management.io/sdk-go/pkg/cloudevents/work" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/garbagecollector" "open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/codec" ) @@ -83,6 +91,11 @@ func (a *cloudeventsAddonManager) Start(ctx context.Context) error { return err } + metadataClient, err := metadata.NewForConfig(config) + if err != nil { + return err + } + dynamicClient, err := dynamic.NewForConfig(config) if err != nil { return err @@ -152,6 +165,25 @@ func (a *cloudeventsAddonManager) Start(ctx context.Context) error { return err } + cachedClient := cacheddiscovery.NewMemCacheClient(kubeClient) + restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cachedClient) + go wait.Until(func() { + restMapper.Reset() + }, 30*time.Second, ctx.Done()) + + // Informer transform to trim ManagedFields for memory efficiency. + trim := func(obj interface{}) (interface{}, error) { + if accessor, err := meta.Accessor(obj); err == nil { + accessor.SetManagedFields(nil) + } + return obj, nil + } + sharedInformers := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, 10*time.Minute, kubeinformers.WithTransform(trim)) + metadataInformers := metadatainformer.NewSharedInformerFactoryWithOptions(metadataClient, 10*time.Minute, metadatainformer.WithTransform(trim)) + informerFactory := informerfactory.NewInformerFactory(sharedInformers, metadataInformers) + garbageCollector := garbagecollector.NewGarbageCollector(workClient.WorkV1(), restMapper, workInformers, informerFactory) + go garbageCollector.Run(ctx, 1) + err = a.StartWithInformers(ctx, workClient, workInformers, kubeInformers, addonInformers, clusterInformers, dynamicInformers) if err != nil { return err diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s index d2ca5deeb..b3c1699bf 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s @@ -19,15 +19,14 @@ #define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \ MULLD r0, h0, t0; \ - MULLD r0, h1, t4; \ MULHDU r0, h0, t1; \ + MULLD r0, h1, t4; \ MULHDU r0, h1, t5; \ ADDC t4, t1, t1; \ MULLD r0, h2, t2; \ - ADDZE t5; \ MULHDU r1, h0, t4; \ MULLD r1, h0, h0; \ - ADD t5, t2, t2; \ + ADDE t5, t2, t2; \ ADDC h0, t1, t1; \ MULLD h2, r1, t3; \ ADDZE t4, h0; \ @@ -37,13 +36,11 @@ ADDE t5, t3, t3; \ ADDC h0, t2, t2; \ MOVD $-4, t4; \ - MOVD t0, h0; \ - MOVD t1, h1; \ ADDZE t3; \ - ANDCC $3, t2, h2; \ - AND t2, t4, t0; \ + RLDICL $0, t2, $62, h2; \ + AND t2, t4, h0; \ ADDC t0, h0, h0; \ - ADDE t3, h1, h1; \ + ADDE t3, t1, h1; \ SLD $62, t3, t4; \ SRD $2, t2; \ ADDZE h2; \ @@ -75,6 +72,7 @@ TEXT ·update(SB), $0-32 loop: POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22) + PCALIGN $16 multiply: POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21) ADD $-16, R5 diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index e2b298d85..43557ab7e 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -1564,6 +1564,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { if size > remainSize { hdec.SetEmitEnabled(false) mh.Truncated = true + remainSize = 0 return } remainSize -= size @@ -1576,6 +1577,36 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { var hc headersOrContinuation = hf for { frag := hc.HeaderBlockFragment() + + // Avoid parsing large amounts of headers that we will then discard. + // If the sender exceeds the max header list size by too much, + // skip parsing the fragment and close the connection. + // + // "Too much" is either any CONTINUATION frame after we've already + // exceeded the max header list size (in which case remainSize is 0), + // or a frame whose encoded size is more than twice the remaining + // header list bytes we're willing to accept. + if int64(len(frag)) > int64(2*remainSize) { + if VerboseLogs { + log.Printf("http2: header list too large") + } + // It would be nice to send a RST_STREAM before sending the GOAWAY, + // but the structure of the server's frame writer makes this difficult. + return nil, ConnectionError(ErrCodeProtocol) + } + + // Also close the connection after any CONTINUATION frame following an + // invalid header, since we stop tracking the size of the headers after + // an invalid one. + if invalid != nil { + if VerboseLogs { + log.Printf("http2: invalid header: %v", invalid) + } + // It would be nice to send a RST_STREAM before sending the GOAWAY, + // but the structure of the server's frame writer makes this difficult. + return nil, ConnectionError(ErrCodeProtocol) + } + if _, err := hdec.Write(frag); err != nil { return nil, ConnectionError(ErrCodeCompression) } diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go index 684d984fd..3b9f06b96 100644 --- a/vendor/golang.org/x/net/http2/pipe.go +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -77,7 +77,10 @@ func (p *pipe) Read(d []byte) (n int, err error) { } } -var errClosedPipeWrite = errors.New("write on closed buffer") +var ( + errClosedPipeWrite = errors.New("write on closed buffer") + errUninitializedPipeWrite = errors.New("write on uninitialized buffer") +) // Write copies bytes from p into the buffer and wakes a reader. // It is an error to write more data than the buffer can hold. @@ -91,6 +94,12 @@ func (p *pipe) Write(d []byte) (n int, err error) { if p.err != nil || p.breakErr != nil { return 0, errClosedPipeWrite } + // pipe.setBuffer is never invoked, leaving the buffer uninitialized. + // We shouldn't try to write to an uninitialized pipe, + // but returning an error is better than panicking. + if p.b == nil { + return 0, errUninitializedPipeWrite + } return p.b.Write(d) } diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index ae94c6408..ce2e8b40e 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -124,6 +124,7 @@ type Server struct { // IdleTimeout specifies how long until idle clients should be // closed with a GOAWAY frame. PING frames are not considered // activity for the purposes of IdleTimeout. + // If zero or negative, there is no timeout. IdleTimeout time.Duration // MaxUploadBufferPerConnection is the size of the initial flow @@ -434,7 +435,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { // passes the connection off to us with the deadline already set. // Write deadlines are set per stream in serverConn.newStream. // Disarm the net.Conn write deadline here. - if sc.hs.WriteTimeout != 0 { + if sc.hs.WriteTimeout > 0 { sc.conn.SetWriteDeadline(time.Time{}) } @@ -924,7 +925,7 @@ func (sc *serverConn) serve() { sc.setConnState(http.StateActive) sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout != 0 { + if sc.srv.IdleTimeout > 0 { sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) defer sc.idleTimer.Stop() } @@ -1637,7 +1638,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { delete(sc.streams, st.id) if len(sc.streams) == 0 { sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout != 0 { + if sc.srv.IdleTimeout > 0 { sc.idleTimer.Reset(sc.srv.IdleTimeout) } if h1ServerKeepAlivesDisabled(sc.hs) { @@ -2017,7 +2018,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // similar to how the http1 server works. Here it's // technically more like the http1 Server's ReadHeaderTimeout // (in Go 1.8), though. That's a more sane option anyway. - if sc.hs.ReadTimeout != 0 { + if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) } @@ -2038,7 +2039,7 @@ func (sc *serverConn) upgradeRequest(req *http.Request) { // Disable any read deadline set by the net/http package // prior to the upgrade. - if sc.hs.ReadTimeout != 0 { + if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) } @@ -2116,7 +2117,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) st.inflow.init(sc.srv.initialStreamRecvWindowSize()) - if sc.hs.WriteTimeout != 0 { + if sc.hs.WriteTimeout > 0 { st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } diff --git a/vendor/golang.org/x/net/http2/testsync.go b/vendor/golang.org/x/net/http2/testsync.go new file mode 100644 index 000000000..61075bd16 --- /dev/null +++ b/vendor/golang.org/x/net/http2/testsync.go @@ -0,0 +1,331 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package http2 + +import ( + "context" + "sync" + "time" +) + +// testSyncHooks coordinates goroutines in tests. +// +// For example, a call to ClientConn.RoundTrip involves several goroutines, including: +// - the goroutine running RoundTrip; +// - the clientStream.doRequest goroutine, which writes the request; and +// - the clientStream.readLoop goroutine, which reads the response. +// +// Using testSyncHooks, a test can start a RoundTrip and identify when all these goroutines +// are blocked waiting for some condition such as reading the Request.Body or waiting for +// flow control to become available. +// +// The testSyncHooks also manage timers and synthetic time in tests. +// This permits us to, for example, start a request and cause it to time out waiting for +// response headers without resorting to time.Sleep calls. +type testSyncHooks struct { + // active/inactive act as a mutex and condition variable. + // + // - neither chan contains a value: testSyncHooks is locked. + // - active contains a value: unlocked, and at least one goroutine is not blocked + // - inactive contains a value: unlocked, and all goroutines are blocked + active chan struct{} + inactive chan struct{} + + // goroutine counts + total int // total goroutines + condwait map[*sync.Cond]int // blocked in sync.Cond.Wait + blocked []*testBlockedGoroutine // otherwise blocked + + // fake time + now time.Time + timers []*fakeTimer + + // Transport testing: Report various events. + newclientconn func(*ClientConn) + newstream func(*clientStream) +} + +// testBlockedGoroutine is a blocked goroutine. +type testBlockedGoroutine struct { + f func() bool // blocked until f returns true + ch chan struct{} // closed when unblocked +} + +func newTestSyncHooks() *testSyncHooks { + h := &testSyncHooks{ + active: make(chan struct{}, 1), + inactive: make(chan struct{}, 1), + condwait: map[*sync.Cond]int{}, + } + h.inactive <- struct{}{} + h.now = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) + return h +} + +// lock acquires the testSyncHooks mutex. +func (h *testSyncHooks) lock() { + select { + case <-h.active: + case <-h.inactive: + } +} + +// waitInactive waits for all goroutines to become inactive. +func (h *testSyncHooks) waitInactive() { + for { + <-h.inactive + if !h.unlock() { + break + } + } +} + +// unlock releases the testSyncHooks mutex. +// It reports whether any goroutines are active. +func (h *testSyncHooks) unlock() (active bool) { + // Look for a blocked goroutine which can be unblocked. + blocked := h.blocked[:0] + unblocked := false + for _, b := range h.blocked { + if !unblocked && b.f() { + unblocked = true + close(b.ch) + } else { + blocked = append(blocked, b) + } + } + h.blocked = blocked + + // Count goroutines blocked on condition variables. + condwait := 0 + for _, count := range h.condwait { + condwait += count + } + + if h.total > condwait+len(blocked) { + h.active <- struct{}{} + return true + } else { + h.inactive <- struct{}{} + return false + } +} + +// goRun starts a new goroutine. +func (h *testSyncHooks) goRun(f func()) { + h.lock() + h.total++ + h.unlock() + go func() { + defer func() { + h.lock() + h.total-- + h.unlock() + }() + f() + }() +} + +// blockUntil indicates that a goroutine is blocked waiting for some condition to become true. +// It waits until f returns true before proceeding. +// +// Example usage: +// +// h.blockUntil(func() bool { +// // Is the context done yet? +// select { +// case <-ctx.Done(): +// default: +// return false +// } +// return true +// }) +// // Wait for the context to become done. +// <-ctx.Done() +// +// The function f passed to blockUntil must be non-blocking and idempotent. +func (h *testSyncHooks) blockUntil(f func() bool) { + if f() { + return + } + ch := make(chan struct{}) + h.lock() + h.blocked = append(h.blocked, &testBlockedGoroutine{ + f: f, + ch: ch, + }) + h.unlock() + <-ch +} + +// broadcast is sync.Cond.Broadcast. +func (h *testSyncHooks) condBroadcast(cond *sync.Cond) { + h.lock() + delete(h.condwait, cond) + h.unlock() + cond.Broadcast() +} + +// broadcast is sync.Cond.Wait. +func (h *testSyncHooks) condWait(cond *sync.Cond) { + h.lock() + h.condwait[cond]++ + h.unlock() +} + +// newTimer creates a new fake timer. +func (h *testSyncHooks) newTimer(d time.Duration) timer { + h.lock() + defer h.unlock() + t := &fakeTimer{ + hooks: h, + when: h.now.Add(d), + c: make(chan time.Time), + } + h.timers = append(h.timers, t) + return t +} + +// afterFunc creates a new fake AfterFunc timer. +func (h *testSyncHooks) afterFunc(d time.Duration, f func()) timer { + h.lock() + defer h.unlock() + t := &fakeTimer{ + hooks: h, + when: h.now.Add(d), + f: f, + } + h.timers = append(h.timers, t) + return t +} + +func (h *testSyncHooks) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { + ctx, cancel := context.WithCancel(ctx) + t := h.afterFunc(d, cancel) + return ctx, func() { + t.Stop() + cancel() + } +} + +func (h *testSyncHooks) timeUntilEvent() time.Duration { + h.lock() + defer h.unlock() + var next time.Time + for _, t := range h.timers { + if next.IsZero() || t.when.Before(next) { + next = t.when + } + } + if d := next.Sub(h.now); d > 0 { + return d + } + return 0 +} + +// advance advances time and causes synthetic timers to fire. +func (h *testSyncHooks) advance(d time.Duration) { + h.lock() + defer h.unlock() + h.now = h.now.Add(d) + timers := h.timers[:0] + for _, t := range h.timers { + t := t // remove after go.mod depends on go1.22 + t.mu.Lock() + switch { + case t.when.After(h.now): + timers = append(timers, t) + case t.when.IsZero(): + // stopped timer + default: + t.when = time.Time{} + if t.c != nil { + close(t.c) + } + if t.f != nil { + h.total++ + go func() { + defer func() { + h.lock() + h.total-- + h.unlock() + }() + t.f() + }() + } + } + t.mu.Unlock() + } + h.timers = timers +} + +// A timer wraps a time.Timer, or a synthetic equivalent in tests. +// Unlike time.Timer, timer is single-use: The timer channel is closed when the timer expires. +type timer interface { + C() <-chan time.Time + Stop() bool + Reset(d time.Duration) bool +} + +// timeTimer implements timer using real time. +type timeTimer struct { + t *time.Timer + c chan time.Time +} + +// newTimeTimer creates a new timer using real time. +func newTimeTimer(d time.Duration) timer { + ch := make(chan time.Time) + t := time.AfterFunc(d, func() { + close(ch) + }) + return &timeTimer{t, ch} +} + +// newTimeAfterFunc creates an AfterFunc timer using real time. +func newTimeAfterFunc(d time.Duration, f func()) timer { + return &timeTimer{ + t: time.AfterFunc(d, f), + } +} + +func (t timeTimer) C() <-chan time.Time { return t.c } +func (t timeTimer) Stop() bool { return t.t.Stop() } +func (t timeTimer) Reset(d time.Duration) bool { return t.t.Reset(d) } + +// fakeTimer implements timer using fake time. +type fakeTimer struct { + hooks *testSyncHooks + + mu sync.Mutex + when time.Time // when the timer will fire + c chan time.Time // closed when the timer fires; mutually exclusive with f + f func() // called when the timer fires; mutually exclusive with c +} + +func (t *fakeTimer) C() <-chan time.Time { return t.c } + +func (t *fakeTimer) Stop() bool { + t.mu.Lock() + defer t.mu.Unlock() + stopped := t.when.IsZero() + t.when = time.Time{} + return stopped +} + +func (t *fakeTimer) Reset(d time.Duration) bool { + if t.c != nil || t.f == nil { + panic("fakeTimer only supports Reset on AfterFunc timers") + } + t.mu.Lock() + defer t.mu.Unlock() + t.hooks.lock() + defer t.hooks.unlock() + active := !t.when.IsZero() + t.when = t.hooks.now.Add(d) + if !active { + t.hooks.timers = append(t.hooks.timers, t) + } + return active +} diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index df578b86c..ce375c8c7 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -147,6 +147,12 @@ type Transport struct { // waiting for their turn. StrictMaxConcurrentStreams bool + // IdleConnTimeout is the maximum amount of time an idle + // (keep-alive) connection will remain idle before closing + // itself. + // Zero means no limit. + IdleConnTimeout time.Duration + // ReadIdleTimeout is the timeout after which a health check using ping // frame will be carried out if no frame is received on the connection. // Note that a ping response will is considered a received frame, so if @@ -178,6 +184,8 @@ type Transport struct { connPoolOnce sync.Once connPoolOrDef ClientConnPool // non-nil version of ConnPool + + syncHooks *testSyncHooks } func (t *Transport) maxHeaderListSize() uint32 { @@ -302,7 +310,7 @@ type ClientConn struct { readerErr error // set before readerDone is closed idleTimeout time.Duration // or 0 for never - idleTimer *time.Timer + idleTimer timer mu sync.Mutex // guards following cond *sync.Cond // hold mu; broadcast on flow/closed changes @@ -344,6 +352,60 @@ type ClientConn struct { werr error // first write error that has occurred hbuf bytes.Buffer // HPACK encoder writes into this henc *hpack.Encoder + + syncHooks *testSyncHooks // can be nil +} + +// Hook points used for testing. +// Outside of tests, cc.syncHooks is nil and these all have minimal implementations. +// Inside tests, see the testSyncHooks function docs. + +// goRun starts a new goroutine. +func (cc *ClientConn) goRun(f func()) { + if cc.syncHooks != nil { + cc.syncHooks.goRun(f) + return + } + go f() +} + +// condBroadcast is cc.cond.Broadcast. +func (cc *ClientConn) condBroadcast() { + if cc.syncHooks != nil { + cc.syncHooks.condBroadcast(cc.cond) + } + cc.cond.Broadcast() +} + +// condWait is cc.cond.Wait. +func (cc *ClientConn) condWait() { + if cc.syncHooks != nil { + cc.syncHooks.condWait(cc.cond) + } + cc.cond.Wait() +} + +// newTimer creates a new time.Timer, or a synthetic timer in tests. +func (cc *ClientConn) newTimer(d time.Duration) timer { + if cc.syncHooks != nil { + return cc.syncHooks.newTimer(d) + } + return newTimeTimer(d) +} + +// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. +func (cc *ClientConn) afterFunc(d time.Duration, f func()) timer { + if cc.syncHooks != nil { + return cc.syncHooks.afterFunc(d, f) + } + return newTimeAfterFunc(d, f) +} + +func (cc *ClientConn) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { + if cc.syncHooks != nil { + return cc.syncHooks.contextWithTimeout(ctx, d) + } + return context.WithTimeout(ctx, d) } // clientStream is the state for a single HTTP/2 stream. One of these @@ -425,7 +487,7 @@ func (cs *clientStream) abortStreamLocked(err error) { // TODO(dneil): Clean up tests where cs.cc.cond is nil. if cs.cc.cond != nil { // Wake up writeRequestBody if it is waiting on flow control. - cs.cc.cond.Broadcast() + cs.cc.condBroadcast() } } @@ -435,7 +497,7 @@ func (cs *clientStream) abortRequestBodyWrite() { defer cc.mu.Unlock() if cs.reqBody != nil && cs.reqBodyClosed == nil { cs.closeReqBodyLocked() - cc.cond.Broadcast() + cc.condBroadcast() } } @@ -445,10 +507,10 @@ func (cs *clientStream) closeReqBodyLocked() { } cs.reqBodyClosed = make(chan struct{}) reqBodyClosed := cs.reqBodyClosed - go func() { + cs.cc.goRun(func() { cs.reqBody.Close() close(reqBodyClosed) - }() + }) } type stickyErrWriter struct { @@ -537,15 +599,6 @@ func authorityAddr(scheme string, authority string) (addr string) { return net.JoinHostPort(host, port) } -var retryBackoffHook func(time.Duration) *time.Timer - -func backoffNewTimer(d time.Duration) *time.Timer { - if retryBackoffHook != nil { - return retryBackoffHook(d) - } - return time.NewTimer(d) -} - // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { @@ -573,13 +626,27 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) d := time.Second * time.Duration(backoff) - timer := backoffNewTimer(d) + var tm timer + if t.syncHooks != nil { + tm = t.syncHooks.newTimer(d) + t.syncHooks.blockUntil(func() bool { + select { + case <-tm.C(): + case <-req.Context().Done(): + default: + return false + } + return true + }) + } else { + tm = newTimeTimer(d) + } select { - case <-timer.C: + case <-tm.C(): t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) continue case <-req.Context().Done(): - timer.Stop() + tm.Stop() err = req.Context().Err() } } @@ -658,6 +725,9 @@ func canRetryError(err error) bool { } func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { + if t.syncHooks != nil { + return t.newClientConn(nil, singleUse, t.syncHooks) + } host, _, err := net.SplitHostPort(addr) if err != nil { return nil, err @@ -666,7 +736,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b if err != nil { return nil, err } - return t.newClientConn(tconn, singleUse) + return t.newClientConn(tconn, singleUse, nil) } func (t *Transport) newTLSConfig(host string) *tls.Config { @@ -732,10 +802,10 @@ func (t *Transport) maxEncoderHeaderTableSize() uint32 { } func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, t.disableKeepAlives()) + return t.newClientConn(c, t.disableKeepAlives(), nil) } -func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { +func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHooks) (*ClientConn, error) { cc := &ClientConn{ t: t, tconn: c, @@ -750,10 +820,15 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro wantSettingsAck: true, pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), + syncHooks: hooks, + } + if hooks != nil { + hooks.newclientconn(cc) + c = cc.tconn } if d := t.idleConnTimeout(); d != 0 { cc.idleTimeout = d - cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) + cc.idleTimer = cc.afterFunc(d, cc.onIdleTimeout) } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -818,7 +893,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro return nil, cc.werr } - go cc.readLoop() + cc.goRun(cc.readLoop) return cc, nil } @@ -826,7 +901,7 @@ func (cc *ClientConn) healthCheck() { pingTimeout := cc.t.pingTimeout() // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. - ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) + ctx, cancel := cc.contextWithTimeout(context.Background(), pingTimeout) defer cancel() cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) @@ -1056,7 +1131,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { // Wait for all in-flight streams to complete or connection to close done := make(chan struct{}) cancelled := false // guarded by cc.mu - go func() { + cc.goRun(func() { cc.mu.Lock() defer cc.mu.Unlock() for { @@ -1068,9 +1143,9 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { if cancelled { break } - cc.cond.Wait() + cc.condWait() } - }() + }) shutdownEnterWaitStateHook() select { case <-done: @@ -1080,7 +1155,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { cc.mu.Lock() // Free the goroutine above cancelled = true - cc.cond.Broadcast() + cc.condBroadcast() cc.mu.Unlock() return ctx.Err() } @@ -1118,7 +1193,7 @@ func (cc *ClientConn) closeForError(err error) { for _, cs := range cc.streams { cs.abortStreamLocked(err) } - cc.cond.Broadcast() + cc.condBroadcast() cc.mu.Unlock() cc.closeConn() } @@ -1215,6 +1290,10 @@ func (cc *ClientConn) decrStreamReservationsLocked() { } func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + return cc.roundTrip(req, nil) +} + +func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) (*http.Response, error) { ctx := req.Context() cs := &clientStream{ cc: cc, @@ -1229,9 +1308,23 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { respHeaderRecv: make(chan struct{}), donec: make(chan struct{}), } - go cs.doRequest(req) + cc.goRun(func() { + cs.doRequest(req) + }) waitDone := func() error { + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-cs.donec: + case <-ctx.Done(): + case <-cs.reqCancel: + default: + return false + } + return true + }) + } select { case <-cs.donec: return nil @@ -1292,7 +1385,24 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { return err } + if streamf != nil { + streamf(cs) + } + for { + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-cs.respHeaderRecv: + case <-cs.abort: + case <-ctx.Done(): + case <-cs.reqCancel: + default: + return false + } + return true + }) + } select { case <-cs.respHeaderRecv: return handleResponseHeaders() @@ -1348,6 +1458,21 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } + var newStreamHook func(*clientStream) + if cc.syncHooks != nil { + newStreamHook = cc.syncHooks.newstream + cc.syncHooks.blockUntil(func() bool { + select { + case cc.reqHeaderMu <- struct{}{}: + <-cc.reqHeaderMu + case <-cs.reqCancel: + case <-ctx.Done(): + default: + return false + } + return true + }) + } select { case cc.reqHeaderMu <- struct{}{}: case <-cs.reqCancel: @@ -1372,6 +1497,10 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { } cc.mu.Unlock() + if newStreamHook != nil { + newStreamHook(cs) + } + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? if !cc.t.disableCompression() && req.Header.Get("Accept-Encoding") == "" && @@ -1452,15 +1581,30 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { var respHeaderTimer <-chan time.Time var respHeaderRecv chan struct{} if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) + timer := cc.newTimer(d) defer timer.Stop() - respHeaderTimer = timer.C + respHeaderTimer = timer.C() respHeaderRecv = cs.respHeaderRecv } // Wait until the peer half-closes its end of the stream, // or until the request is aborted (via context, error, or otherwise), // whichever comes first. for { + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-cs.peerClosed: + case <-respHeaderTimer: + case <-respHeaderRecv: + case <-cs.abort: + case <-ctx.Done(): + case <-cs.reqCancel: + default: + return false + } + return true + }) + } select { case <-cs.peerClosed: return nil @@ -1609,7 +1753,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { return nil } cc.pendingRequests++ - cc.cond.Wait() + cc.condWait() cc.pendingRequests-- select { case <-cs.abort: @@ -1871,8 +2015,24 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) cs.flow.take(take) return take, nil } - cc.cond.Wait() + cc.condWait() + } +} + +func validateHeaders(hdrs http.Header) string { + for k, vv := range hdrs { + if !httpguts.ValidHeaderFieldName(k) { + return fmt.Sprintf("name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + // Don't include the value in the error, + // because it may be sensitive. + return fmt.Sprintf("value for header %q", k) + } + } } + return "" } var errNilRequestURL = errors.New("http2: Request.URI is nil") @@ -1912,19 +2072,14 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } } - // Check for any invalid headers and return an error before we + // Check for any invalid headers+trailers and return an error before we // potentially pollute our hpack state. (We want to be able to // continue to reuse the hpack encoder for future requests) - for k, vv := range req.Header { - if !httpguts.ValidHeaderFieldName(k) { - return nil, fmt.Errorf("invalid HTTP header name %q", k) - } - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - // Don't include the value in the error, because it may be sensitive. - return nil, fmt.Errorf("invalid HTTP header value for header %q", k) - } - } + if err := validateHeaders(req.Header); err != "" { + return nil, fmt.Errorf("invalid HTTP header %s", err) + } + if err := validateHeaders(req.Trailer); err != "" { + return nil, fmt.Errorf("invalid HTTP trailer %s", err) } enumerateHeaders := func(f func(name, value string)) { @@ -2143,7 +2298,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) { } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. - cc.cond.Broadcast() + cc.condBroadcast() closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 { @@ -2231,7 +2386,7 @@ func (rl *clientConnReadLoop) cleanup() { cs.abortStreamLocked(err) } } - cc.cond.Broadcast() + cc.condBroadcast() cc.mu.Unlock() } @@ -2266,10 +2421,9 @@ func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false readIdleTimeout := cc.t.ReadIdleTimeout - var t *time.Timer + var t timer if readIdleTimeout != 0 { - t = time.AfterFunc(readIdleTimeout, cc.healthCheck) - defer t.Stop() + t = cc.afterFunc(readIdleTimeout, cc.healthCheck) } for { f, err := cc.fr.ReadFrame() @@ -2684,7 +2838,7 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { }) return nil } - if !cs.firstByte { + if !cs.pastHeaders { cc.logf("protocol error: received DATA before a HEADERS frame") rl.endStreamError(cs, StreamError{ StreamID: f.StreamID, @@ -2867,7 +3021,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { for _, cs := range cc.streams { cs.flow.add(delta) } - cc.cond.Broadcast() + cc.condBroadcast() cc.initialWindowSize = s.Val case SettingHeaderTableSize: @@ -2911,9 +3065,18 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { fl = &cs.flow } if !fl.add(int32(f.Increment)) { + // For stream, the sender sends RST_STREAM with an error code of FLOW_CONTROL_ERROR + if cs != nil { + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeFlowControl, + }) + return nil + } + return ConnectionError(ErrCodeFlowControl) } - cc.cond.Broadcast() + cc.condBroadcast() return nil } @@ -2955,24 +3118,38 @@ func (cc *ClientConn) Ping(ctx context.Context) error { } cc.mu.Unlock() } - errc := make(chan error, 1) - go func() { + var pingError error + errc := make(chan struct{}) + cc.goRun(func() { cc.wmu.Lock() defer cc.wmu.Unlock() - if err := cc.fr.WritePing(false, p); err != nil { - errc <- err + if pingError = cc.fr.WritePing(false, p); pingError != nil { + close(errc) return } - if err := cc.bw.Flush(); err != nil { - errc <- err + if pingError = cc.bw.Flush(); pingError != nil { + close(errc) return } - }() + }) + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-c: + case <-errc: + case <-ctx.Done(): + case <-cc.readerDone: + default: + return false + } + return true + }) + } select { case <-c: return nil - case err := <-errc: - return err + case <-errc: + return pingError case <-ctx.Done(): return ctx.Err() case <-cc.readerDone: @@ -3141,9 +3318,17 @@ func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, err } func (t *Transport) idleConnTimeout() time.Duration { + // to keep things backwards compatible, we use non-zero values of + // IdleConnTimeout, followed by using the IdleConnTimeout on the underlying + // http1 transport, followed by 0 + if t.IdleConnTimeout != 0 { + return t.IdleConnTimeout + } + if t.t1 != nil { return t.t1.IdleConnTimeout } + return 0 } diff --git a/vendor/golang.org/x/net/websocket/client.go b/vendor/golang.org/x/net/websocket/client.go index 69a4ac7ee..1e64157f3 100644 --- a/vendor/golang.org/x/net/websocket/client.go +++ b/vendor/golang.org/x/net/websocket/client.go @@ -6,10 +6,12 @@ package websocket import ( "bufio" + "context" "io" "net" "net/http" "net/url" + "time" ) // DialError is an error that occurs while dialling a websocket server. @@ -79,28 +81,59 @@ func parseAuthority(location *url.URL) string { // DialConfig opens a new client connection to a WebSocket with a config. func DialConfig(config *Config) (ws *Conn, err error) { - var client net.Conn + return config.DialContext(context.Background()) +} + +// DialContext opens a new client connection to a WebSocket, with context support for timeouts/cancellation. +func (config *Config) DialContext(ctx context.Context) (*Conn, error) { if config.Location == nil { return nil, &DialError{config, ErrBadWebSocketLocation} } if config.Origin == nil { return nil, &DialError{config, ErrBadWebSocketOrigin} } + dialer := config.Dialer if dialer == nil { dialer = &net.Dialer{} } - client, err = dialWithDialer(dialer, config) - if err != nil { - goto Error - } - ws, err = NewClient(config, client) + + client, err := dialWithDialer(ctx, dialer, config) if err != nil { - client.Close() - goto Error + return nil, &DialError{config, err} } - return -Error: - return nil, &DialError{config, err} + // Cleanup the connection if we fail to create the websocket successfully + success := false + defer func() { + if !success { + _ = client.Close() + } + }() + + var ws *Conn + var wsErr error + doneConnecting := make(chan struct{}) + go func() { + defer close(doneConnecting) + ws, err = NewClient(config, client) + if err != nil { + wsErr = &DialError{config, err} + } + }() + + // The websocket.NewClient() function can block indefinitely, make sure that we + // respect the deadlines specified by the context. + select { + case <-ctx.Done(): + // Force the pending operations to fail, terminating the pending connection attempt + _ = client.SetDeadline(time.Now()) + <-doneConnecting // Wait for the goroutine that tries to establish the connection to finish + return nil, &DialError{config, ctx.Err()} + case <-doneConnecting: + if wsErr == nil { + success = true // Disarm the deferred connection cleanup + } + return ws, wsErr + } } diff --git a/vendor/golang.org/x/net/websocket/dial.go b/vendor/golang.org/x/net/websocket/dial.go index 2dab943a4..8a2d83c47 100644 --- a/vendor/golang.org/x/net/websocket/dial.go +++ b/vendor/golang.org/x/net/websocket/dial.go @@ -5,18 +5,23 @@ package websocket import ( + "context" "crypto/tls" "net" ) -func dialWithDialer(dialer *net.Dialer, config *Config) (conn net.Conn, err error) { +func dialWithDialer(ctx context.Context, dialer *net.Dialer, config *Config) (conn net.Conn, err error) { switch config.Location.Scheme { case "ws": - conn, err = dialer.Dial("tcp", parseAuthority(config.Location)) + conn, err = dialer.DialContext(ctx, "tcp", parseAuthority(config.Location)) case "wss": - conn, err = tls.DialWithDialer(dialer, "tcp", parseAuthority(config.Location), config.TlsConfig) + tlsDialer := &tls.Dialer{ + NetDialer: dialer, + Config: config.TlsConfig, + } + conn, err = tlsDialer.DialContext(ctx, "tcp", parseAuthority(config.Location)) default: err = ErrBadScheme } diff --git a/vendor/golang.org/x/sys/unix/aliases.go b/vendor/golang.org/x/sys/unix/aliases.go index e7d3df4bd..b0e419857 100644 --- a/vendor/golang.org/x/sys/unix/aliases.go +++ b/vendor/golang.org/x/sys/unix/aliases.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9 +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go index 16dc69937..2f0fa76e4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin && go1.12 +//go:build darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 64d1bb4db..2b57e0f73 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -13,6 +13,7 @@ package unix import ( + "errors" "sync" "unsafe" ) @@ -169,25 +170,26 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} n := unsafe.Sizeof(uname.Sysname) - if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil { + // Suppress ENOMEM errors to be compatible with the C library __xuname() implementation. + if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_HOSTNAME} n = unsafe.Sizeof(uname.Nodename) - if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_OSRELEASE} n = unsafe.Sizeof(uname.Release) - if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_VERSION} n = unsafe.Sizeof(uname.Version) - if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } @@ -205,7 +207,7 @@ func Uname(uname *Utsname) error { mib = []_C_int{CTL_HW, HW_MACHINE} n = unsafe.Sizeof(uname.Machine) - if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 0f85e29e6..5682e2628 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1849,6 +1849,105 @@ func Dup2(oldfd, newfd int) error { //sys Fsmount(fd int, flags int, mountAttrs int) (fsfd int, err error) //sys Fsopen(fsName string, flags int) (fd int, err error) //sys Fspick(dirfd int, pathName string, flags int) (fd int, err error) + +//sys fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) + +func fsconfigCommon(fd int, cmd uint, key string, value *byte, aux int) (err error) { + var keyp *byte + if keyp, err = BytePtrFromString(key); err != nil { + return + } + return fsconfig(fd, cmd, keyp, value, aux) +} + +// FsconfigSetFlag is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_FLAG. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +func FsconfigSetFlag(fd int, key string) (err error) { + return fsconfigCommon(fd, FSCONFIG_SET_FLAG, key, nil, 0) +} + +// FsconfigSetString is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_STRING. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is the parameter value to set. +func FsconfigSetString(fd int, key string, value string) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(value); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_STRING, key, valuep, 0) +} + +// FsconfigSetBinary is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_BINARY. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is the parameter value to set. +func FsconfigSetBinary(fd int, key string, value []byte) (err error) { + if len(value) == 0 { + return EINVAL + } + return fsconfigCommon(fd, FSCONFIG_SET_BINARY, key, &value[0], len(value)) +} + +// FsconfigSetPath is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_PATH. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// path is a non-empty path for specified key. +// atfd is a file descriptor at which to start lookup from or AT_FDCWD. +func FsconfigSetPath(fd int, key string, path string, atfd int) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(path); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_PATH, key, valuep, atfd) +} + +// FsconfigSetPathEmpty is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_PATH_EMPTY. The same as +// FconfigSetPath but with AT_PATH_EMPTY implied. +func FsconfigSetPathEmpty(fd int, key string, path string, atfd int) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(path); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_PATH_EMPTY, key, valuep, atfd) +} + +// FsconfigSetFd is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_FD. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is a file descriptor to be assigned to specified key. +func FsconfigSetFd(fd int, key string, value int) (err error) { + return fsconfigCommon(fd, FSCONFIG_SET_FD, key, nil, value) +} + +// FsconfigCreate is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_CMD_CREATE. +// +// fd is the filesystem context to act upon. +func FsconfigCreate(fd int) (err error) { + return fsconfig(fd, FSCONFIG_CMD_CREATE, nil, nil, 0) +} + +// FsconfigReconfigure is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_CMD_RECONFIGURE. +// +// fd is the filesystem context to act upon. +func FsconfigReconfigure(fd int) (err error) { + return fsconfig(fd, FSCONFIG_CMD_RECONFIGURE, nil, nil, 0) +} + //sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64 //sysnb Getpgid(pid int) (pgid int, err error) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 1488d2712..87d8612a1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -906,6 +906,16 @@ func Fspick(dirfd int, pathName string, flags int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) { + _, _, e1 := Syscall6(SYS_FSCONFIG, uintptr(fd), uintptr(cmd), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(value)), uintptr(aux), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index dc0c955ee..eff6bcdef 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -836,6 +836,15 @@ const ( FSPICK_EMPTY_PATH = 0x8 FSMOUNT_CLOEXEC = 0x1 + + FSCONFIG_SET_FLAG = 0x0 + FSCONFIG_SET_STRING = 0x1 + FSCONFIG_SET_BINARY = 0x2 + FSCONFIG_SET_PATH = 0x3 + FSCONFIG_SET_PATH_EMPTY = 0x4 + FSCONFIG_SET_FD = 0x5 + FSCONFIG_CMD_CREATE = 0x6 + FSCONFIG_CMD_RECONFIGURE = 0x7 ) type OpenHow struct { @@ -1550,6 +1559,7 @@ const ( IFLA_DEVLINK_PORT = 0x3e IFLA_GSO_IPV4_MAX_SIZE = 0x3f IFLA_GRO_IPV4_MAX_SIZE = 0x40 + IFLA_DPLL_PIN = 0x41 IFLA_PROTO_DOWN_REASON_UNSPEC = 0x0 IFLA_PROTO_DOWN_REASON_MASK = 0x1 IFLA_PROTO_DOWN_REASON_VALUE = 0x2 @@ -1565,6 +1575,7 @@ const ( IFLA_INET6_ICMP6STATS = 0x6 IFLA_INET6_TOKEN = 0x7 IFLA_INET6_ADDR_GEN_MODE = 0x8 + IFLA_INET6_RA_MTU = 0x9 IFLA_BR_UNSPEC = 0x0 IFLA_BR_FORWARD_DELAY = 0x1 IFLA_BR_HELLO_TIME = 0x2 @@ -1612,6 +1623,9 @@ const ( IFLA_BR_MCAST_MLD_VERSION = 0x2c IFLA_BR_VLAN_STATS_PER_PORT = 0x2d IFLA_BR_MULTI_BOOLOPT = 0x2e + IFLA_BR_MCAST_QUERIER_STATE = 0x2f + IFLA_BR_FDB_N_LEARNED = 0x30 + IFLA_BR_FDB_MAX_LEARNED = 0x31 IFLA_BRPORT_UNSPEC = 0x0 IFLA_BRPORT_STATE = 0x1 IFLA_BRPORT_PRIORITY = 0x2 @@ -1649,6 +1663,14 @@ const ( IFLA_BRPORT_BACKUP_PORT = 0x22 IFLA_BRPORT_MRP_RING_OPEN = 0x23 IFLA_BRPORT_MRP_IN_OPEN = 0x24 + IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT = 0x25 + IFLA_BRPORT_MCAST_EHT_HOSTS_CNT = 0x26 + IFLA_BRPORT_LOCKED = 0x27 + IFLA_BRPORT_MAB = 0x28 + IFLA_BRPORT_MCAST_N_GROUPS = 0x29 + IFLA_BRPORT_MCAST_MAX_GROUPS = 0x2a + IFLA_BRPORT_NEIGH_VLAN_SUPPRESS = 0x2b + IFLA_BRPORT_BACKUP_NHID = 0x2c IFLA_INFO_UNSPEC = 0x0 IFLA_INFO_KIND = 0x1 IFLA_INFO_DATA = 0x2 @@ -1670,6 +1692,9 @@ const ( IFLA_MACVLAN_MACADDR = 0x4 IFLA_MACVLAN_MACADDR_DATA = 0x5 IFLA_MACVLAN_MACADDR_COUNT = 0x6 + IFLA_MACVLAN_BC_QUEUE_LEN = 0x7 + IFLA_MACVLAN_BC_QUEUE_LEN_USED = 0x8 + IFLA_MACVLAN_BC_CUTOFF = 0x9 IFLA_VRF_UNSPEC = 0x0 IFLA_VRF_TABLE = 0x1 IFLA_VRF_PORT_UNSPEC = 0x0 @@ -1693,9 +1718,22 @@ const ( IFLA_XFRM_UNSPEC = 0x0 IFLA_XFRM_LINK = 0x1 IFLA_XFRM_IF_ID = 0x2 + IFLA_XFRM_COLLECT_METADATA = 0x3 IFLA_IPVLAN_UNSPEC = 0x0 IFLA_IPVLAN_MODE = 0x1 IFLA_IPVLAN_FLAGS = 0x2 + NETKIT_NEXT = -0x1 + NETKIT_PASS = 0x0 + NETKIT_DROP = 0x2 + NETKIT_REDIRECT = 0x7 + NETKIT_L2 = 0x0 + NETKIT_L3 = 0x1 + IFLA_NETKIT_UNSPEC = 0x0 + IFLA_NETKIT_PEER_INFO = 0x1 + IFLA_NETKIT_PRIMARY = 0x2 + IFLA_NETKIT_POLICY = 0x3 + IFLA_NETKIT_PEER_POLICY = 0x4 + IFLA_NETKIT_MODE = 0x5 IFLA_VXLAN_UNSPEC = 0x0 IFLA_VXLAN_ID = 0x1 IFLA_VXLAN_GROUP = 0x2 @@ -1726,6 +1764,8 @@ const ( IFLA_VXLAN_GPE = 0x1b IFLA_VXLAN_TTL_INHERIT = 0x1c IFLA_VXLAN_DF = 0x1d + IFLA_VXLAN_VNIFILTER = 0x1e + IFLA_VXLAN_LOCALBYPASS = 0x1f IFLA_GENEVE_UNSPEC = 0x0 IFLA_GENEVE_ID = 0x1 IFLA_GENEVE_REMOTE = 0x2 @@ -1740,6 +1780,7 @@ const ( IFLA_GENEVE_LABEL = 0xb IFLA_GENEVE_TTL_INHERIT = 0xc IFLA_GENEVE_DF = 0xd + IFLA_GENEVE_INNER_PROTO_INHERIT = 0xe IFLA_BAREUDP_UNSPEC = 0x0 IFLA_BAREUDP_PORT = 0x1 IFLA_BAREUDP_ETHERTYPE = 0x2 @@ -1752,6 +1793,8 @@ const ( IFLA_GTP_FD1 = 0x2 IFLA_GTP_PDP_HASHSIZE = 0x3 IFLA_GTP_ROLE = 0x4 + IFLA_GTP_CREATE_SOCKETS = 0x5 + IFLA_GTP_RESTART_COUNT = 0x6 IFLA_BOND_UNSPEC = 0x0 IFLA_BOND_MODE = 0x1 IFLA_BOND_ACTIVE_SLAVE = 0x2 @@ -1781,6 +1824,9 @@ const ( IFLA_BOND_AD_ACTOR_SYSTEM = 0x1a IFLA_BOND_TLB_DYNAMIC_LB = 0x1b IFLA_BOND_PEER_NOTIF_DELAY = 0x1c + IFLA_BOND_AD_LACP_ACTIVE = 0x1d + IFLA_BOND_MISSED_MAX = 0x1e + IFLA_BOND_NS_IP6_TARGET = 0x1f IFLA_BOND_AD_INFO_UNSPEC = 0x0 IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 @@ -1796,6 +1842,7 @@ const ( IFLA_BOND_SLAVE_AD_AGGREGATOR_ID = 0x6 IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE = 0x7 IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE = 0x8 + IFLA_BOND_SLAVE_PRIO = 0x9 IFLA_VF_INFO_UNSPEC = 0x0 IFLA_VF_INFO = 0x1 IFLA_VF_UNSPEC = 0x0 @@ -1854,8 +1901,16 @@ const ( IFLA_STATS_LINK_XSTATS_SLAVE = 0x3 IFLA_STATS_LINK_OFFLOAD_XSTATS = 0x4 IFLA_STATS_AF_SPEC = 0x5 + IFLA_STATS_GETSET_UNSPEC = 0x0 + IFLA_STATS_GET_FILTERS = 0x1 + IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS = 0x2 IFLA_OFFLOAD_XSTATS_UNSPEC = 0x0 IFLA_OFFLOAD_XSTATS_CPU_HIT = 0x1 + IFLA_OFFLOAD_XSTATS_HW_S_INFO = 0x2 + IFLA_OFFLOAD_XSTATS_L3_STATS = 0x3 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC = 0x0 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST = 0x1 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED = 0x2 IFLA_XDP_UNSPEC = 0x0 IFLA_XDP_FD = 0x1 IFLA_XDP_ATTACHED = 0x2 @@ -1885,6 +1940,11 @@ const ( IFLA_RMNET_UNSPEC = 0x0 IFLA_RMNET_MUX_ID = 0x1 IFLA_RMNET_FLAGS = 0x2 + IFLA_MCTP_UNSPEC = 0x0 + IFLA_MCTP_NET = 0x1 + IFLA_DSA_UNSPEC = 0x0 + IFLA_DSA_CONDUIT = 0x1 + IFLA_DSA_MASTER = 0x1 ) const ( diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go index 646c640fc..2dbfa0991 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go @@ -178,7 +178,7 @@ func (a *variableAccessor) Callback(_ *lazy.MapValue) ref.Val { return types.NewErr("composited variable %q fails to compile: %v", a.name, a.result.Error) } - v, details, err := a.result.Program.Eval(a.activation) + v, details, err := a.result.Program.ContextEval(a.context, a.activation) if details == nil { return types.NewErr("unable to get evaluation details of variable %q", a.name) } diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go index b2624694c..9cd3c01ae 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go @@ -180,8 +180,9 @@ func (c *policyController) reconcilePolicyDefinitionSpec(namespace, name string, celmetrics.Metrics.ObserveDefinition(context.TODO(), "active", "deny") } - // Skip reconcile if the spec of the definition is unchanged - if info.lastReconciledValue != nil && definition != nil && + // Skip reconcile if the spec of the definition is unchanged and had a + // successful previous sync + if info.configurationError == nil && info.lastReconciledValue != nil && definition != nil && apiequality.Semantic.DeepEqual(info.lastReconciledValue.Spec, definition.Spec) { return nil } diff --git a/vendor/k8s.io/apiserver/pkg/cel/environment/base.go b/vendor/k8s.io/apiserver/pkg/cel/environment/base.go index 76a0bccee..0c1dee82d 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/environment/base.go +++ b/vendor/k8s.io/apiserver/pkg/cel/environment/base.go @@ -62,10 +62,18 @@ var baseOpts = []VersionedOptions{ library.URLs(), library.Regex(), library.Lists(), + + // cel-go v0.17.7 change the cost of has() from 0 to 1, but also provided the CostEstimatorOptions option to preserve the old behavior, so we enabled it at the same time we bumped our cel version to v0.17.7. + // Since it is a regression fix, we apply it uniformly to all code use v0.17.7. + cel.CostEstimatorOptions(checker.PresenceTestHasCost(false)), }, ProgramOptions: []cel.ProgramOption{ cel.EvalOptions(cel.OptOptimize, cel.OptTrackCost), cel.CostLimit(celconfig.PerCallLimit), + + // cel-go v0.17.7 change the cost of has() from 0 to 1, but also provided the CostEstimatorOptions option to preserve the old behavior, so we enabled it at the same time we bumped our cel version to v0.17.7. + // Since it is a regression fix, we apply it uniformly to all code use v0.17.7. + cel.CostTrackerOptions(interpreter.PresenceTestHasCost(false)), }, }, { @@ -113,14 +121,6 @@ var baseOpts = []VersionedOptions{ IntroducedVersion: version.MajorMinor(1, 29), EnvOptions: []cel.EnvOption{ ext.Sets(), - // cel-go v0.17.7 introduced CostEstimatorOptions. - // Previous the presence has a cost of 0 but cel fixed it to 1. We still set to 0 here to avoid breaking changes. - cel.CostEstimatorOptions(checker.PresenceTestHasCost(false)), - }, - ProgramOptions: []cel.ProgramOption{ - // cel-go v0.17.7 introduced CostTrackerOptions. - // Previous the presence has a cost of 0 but cel fixed it to 1. We still set to 0 here to avoid breaking changes. - cel.CostTrackerOptions(interpreter.PresenceTestHasCost(false)), }, }, } diff --git a/vendor/k8s.io/client-go/metadata/metadatainformer/informer.go b/vendor/k8s.io/client-go/metadata/metadatainformer/informer.go new file mode 100644 index 000000000..ff3537e98 --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/metadatainformer/informer.go @@ -0,0 +1,215 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadatainformer + +import ( + "context" + "sync" + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/informers" + "k8s.io/client-go/metadata" + "k8s.io/client-go/metadata/metadatalister" + "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for metadataSharedInformerFactory. +type SharedInformerOption func(*metadataSharedInformerFactory) *metadataSharedInformerFactory + +// WithTransform sets a transform on all informers. +func WithTransform(transform cache.TransformFunc) SharedInformerOption { + return func(factory *metadataSharedInformerFactory) *metadataSharedInformerFactory { + factory.transform = transform + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of metadataSharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client metadata.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewFilteredSharedInformerFactory(client, defaultResync, metav1.NamespaceAll, nil) +} + +// NewFilteredSharedInformerFactory constructs a new instance of metadataSharedInformerFactory. +// Listers obtained via this factory will be subject to the same filters as specified here. +func NewFilteredSharedInformerFactory(client metadata.Interface, defaultResync time.Duration, namespace string, tweakListOptions TweakListOptionsFunc) SharedInformerFactory { + return &metadataSharedInformerFactory{ + client: client, + defaultResync: defaultResync, + namespace: namespace, + informers: map[schema.GroupVersionResource]informers.GenericInformer{}, + startedInformers: make(map[schema.GroupVersionResource]bool), + tweakListOptions: tweakListOptions, + } +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of metadataSharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client metadata.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &metadataSharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: map[schema.GroupVersionResource]informers.GenericInformer{}, + startedInformers: make(map[schema.GroupVersionResource]bool), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +type metadataSharedInformerFactory struct { + client metadata.Interface + defaultResync time.Duration + namespace string + transform cache.TransformFunc + + lock sync.Mutex + informers map[schema.GroupVersionResource]informers.GenericInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[schema.GroupVersionResource]bool + tweakListOptions TweakListOptionsFunc + // wg tracks how many goroutines were started. + wg sync.WaitGroup + // shuttingDown is true when Shutdown has been called. It may still be running + // because it needs to wait for goroutines. + shuttingDown bool +} + +var _ SharedInformerFactory = &metadataSharedInformerFactory{} + +func (f *metadataSharedInformerFactory) ForResource(gvr schema.GroupVersionResource) informers.GenericInformer { + f.lock.Lock() + defer f.lock.Unlock() + + key := gvr + informer, exists := f.informers[key] + if exists { + return informer + } + + informer = NewFilteredMetadataInformer(f.client, gvr, f.namespace, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) + informer.Informer().SetTransform(f.transform) + f.informers[key] = informer + + return informer +} + +// Start initializes all requested informers. +func (f *metadataSharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.shuttingDown { + return + } + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + f.wg.Add(1) + // We need a new variable in each loop iteration, + // otherwise the goroutine would use the loop variable + // and that keeps changing. + informer := informer.Informer() + go func() { + defer f.wg.Done() + informer.Run(stopCh) + }() + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *metadataSharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[schema.GroupVersionResource]bool { + informers := func() map[schema.GroupVersionResource]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[schema.GroupVersionResource]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer.Informer() + } + } + return informers + }() + + res := map[schema.GroupVersionResource]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +func (f *metadataSharedInformerFactory) Shutdown() { + // Will return immediately if there is nothing to wait for. + defer f.wg.Wait() + + f.lock.Lock() + defer f.lock.Unlock() + f.shuttingDown = true +} + +// NewFilteredMetadataInformer constructs a new informer for a metadata type. +func NewFilteredMetadataInformer(client metadata.Interface, gvr schema.GroupVersionResource, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions TweakListOptionsFunc) informers.GenericInformer { + return &metadataInformer{ + gvr: gvr, + informer: cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.Resource(gvr).Namespace(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.Resource(gvr).Namespace(namespace).Watch(context.TODO(), options) + }, + }, + &metav1.PartialObjectMetadata{}, + resyncPeriod, + indexers, + ), + } +} + +type metadataInformer struct { + informer cache.SharedIndexInformer + gvr schema.GroupVersionResource +} + +var _ informers.GenericInformer = &metadataInformer{} + +func (d *metadataInformer) Informer() cache.SharedIndexInformer { + return d.informer +} + +func (d *metadataInformer) Lister() cache.GenericLister { + return metadatalister.NewRuntimeObjectShim(metadatalister.New(d.informer.GetIndexer(), d.gvr)) +} diff --git a/vendor/k8s.io/client-go/metadata/metadatainformer/interface.go b/vendor/k8s.io/client-go/metadata/metadatainformer/interface.go new file mode 100644 index 000000000..9f61706cd --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/metadatainformer/interface.go @@ -0,0 +1,53 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadatainformer + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/informers" +) + +// SharedInformerFactory provides access to a shared informer and lister for dynamic client +type SharedInformerFactory interface { + // Start initializes all requested informers. They are handled in goroutines + // which run until the stop channel gets closed. + Start(stopCh <-chan struct{}) + + // ForResource gives generic access to a shared informer of the matching type. + ForResource(gvr schema.GroupVersionResource) informers.GenericInformer + + // WaitForCacheSync blocks until all started informers' caches were synced + // or the stop channel gets closed. + WaitForCacheSync(stopCh <-chan struct{}) map[schema.GroupVersionResource]bool + + // Shutdown marks a factory as shutting down. At that point no new + // informers can be started anymore and Start will return without + // doing anything. + // + // In addition, Shutdown blocks until all goroutines have terminated. For that + // to happen, the close channel(s) that they were started with must be closed, + // either before Shutdown gets called or while it is waiting. + // + // Shutdown may be called multiple times, even concurrently. All such calls will + // block until all goroutines have terminated. + Shutdown() +} + +// TweakListOptionsFunc defines the signature of a helper function +// that wants to provide more listing options to API +type TweakListOptionsFunc func(*metav1.ListOptions) diff --git a/vendor/k8s.io/client-go/metadata/metadatalister/interface.go b/vendor/k8s.io/client-go/metadata/metadatalister/interface.go new file mode 100644 index 000000000..bb3548589 --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/metadatalister/interface.go @@ -0,0 +1,40 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadatalister + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// Lister helps list resources. +type Lister interface { + // List lists all resources in the indexer. + List(selector labels.Selector) (ret []*metav1.PartialObjectMetadata, err error) + // Get retrieves a resource from the indexer with the given name + Get(name string) (*metav1.PartialObjectMetadata, error) + // Namespace returns an object that can list and get resources in a given namespace. + Namespace(namespace string) NamespaceLister +} + +// NamespaceLister helps list and get resources. +type NamespaceLister interface { + // List lists all resources in the indexer for a given namespace. + List(selector labels.Selector) (ret []*metav1.PartialObjectMetadata, err error) + // Get retrieves a resource from the indexer for a given namespace and name. + Get(name string) (*metav1.PartialObjectMetadata, error) +} diff --git a/vendor/k8s.io/client-go/metadata/metadatalister/lister.go b/vendor/k8s.io/client-go/metadata/metadatalister/lister.go new file mode 100644 index 000000000..faeccc0fc --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/metadatalister/lister.go @@ -0,0 +1,91 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadatalister + +import ( + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" +) + +var _ Lister = &metadataLister{} +var _ NamespaceLister = &metadataNamespaceLister{} + +// metadataLister implements the Lister interface. +type metadataLister struct { + indexer cache.Indexer + gvr schema.GroupVersionResource +} + +// New returns a new Lister. +func New(indexer cache.Indexer, gvr schema.GroupVersionResource) Lister { + return &metadataLister{indexer: indexer, gvr: gvr} +} + +// List lists all resources in the indexer. +func (l *metadataLister) List(selector labels.Selector) (ret []*metav1.PartialObjectMetadata, err error) { + err = cache.ListAll(l.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*metav1.PartialObjectMetadata)) + }) + return ret, err +} + +// Get retrieves a resource from the indexer with the given name +func (l *metadataLister) Get(name string) (*metav1.PartialObjectMetadata, error) { + obj, exists, err := l.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(l.gvr.GroupResource(), name) + } + return obj.(*metav1.PartialObjectMetadata), nil +} + +// Namespace returns an object that can list and get resources from a given namespace. +func (l *metadataLister) Namespace(namespace string) NamespaceLister { + return &metadataNamespaceLister{indexer: l.indexer, namespace: namespace, gvr: l.gvr} +} + +// metadataNamespaceLister implements the NamespaceLister interface. +type metadataNamespaceLister struct { + indexer cache.Indexer + namespace string + gvr schema.GroupVersionResource +} + +// List lists all resources in the indexer for a given namespace. +func (l *metadataNamespaceLister) List(selector labels.Selector) (ret []*metav1.PartialObjectMetadata, err error) { + err = cache.ListAllByNamespace(l.indexer, l.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*metav1.PartialObjectMetadata)) + }) + return ret, err +} + +// Get retrieves a resource from the indexer for a given namespace and name. +func (l *metadataNamespaceLister) Get(name string) (*metav1.PartialObjectMetadata, error) { + obj, exists, err := l.indexer.GetByKey(l.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(l.gvr.GroupResource(), name) + } + return obj.(*metav1.PartialObjectMetadata), nil +} diff --git a/vendor/k8s.io/client-go/metadata/metadatalister/shim.go b/vendor/k8s.io/client-go/metadata/metadatalister/shim.go new file mode 100644 index 000000000..f31c60725 --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/metadatalister/shim.go @@ -0,0 +1,87 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadatalister + +import ( + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/cache" +) + +var _ cache.GenericLister = &metadataListerShim{} +var _ cache.GenericNamespaceLister = &metadataNamespaceListerShim{} + +// metadataListerShim implements the cache.GenericLister interface. +type metadataListerShim struct { + lister Lister +} + +// NewRuntimeObjectShim returns a new shim for Lister. +// It wraps Lister so that it implements cache.GenericLister interface +func NewRuntimeObjectShim(lister Lister) cache.GenericLister { + return &metadataListerShim{lister: lister} +} + +// List will return all objects across namespaces +func (s *metadataListerShim) List(selector labels.Selector) (ret []runtime.Object, err error) { + objs, err := s.lister.List(selector) + if err != nil { + return nil, err + } + + ret = make([]runtime.Object, len(objs)) + for index, obj := range objs { + ret[index] = obj + } + return ret, err +} + +// Get will attempt to retrieve assuming that name==key +func (s *metadataListerShim) Get(name string) (runtime.Object, error) { + return s.lister.Get(name) +} + +func (s *metadataListerShim) ByNamespace(namespace string) cache.GenericNamespaceLister { + return &metadataNamespaceListerShim{ + namespaceLister: s.lister.Namespace(namespace), + } +} + +// metadataNamespaceListerShim implements the NamespaceLister interface. +// It wraps NamespaceLister so that it implements cache.GenericNamespaceLister interface +type metadataNamespaceListerShim struct { + namespaceLister NamespaceLister +} + +// List will return all objects in this namespace +func (ns *metadataNamespaceListerShim) List(selector labels.Selector) (ret []runtime.Object, err error) { + objs, err := ns.namespaceLister.List(selector) + if err != nil { + return nil, err + } + + ret = make([]runtime.Object, len(objs)) + for index, obj := range objs { + ret[index] = obj + } + return ret, err +} + +// Get will attempt to retrieve by namespace and name +func (ns *metadataNamespaceListerShim) Get(name string) (runtime.Object, error) { + return ns.namespaceLister.Get(name) +} diff --git a/vendor/k8s.io/component-base/metrics/prometheus/slis/metrics.go b/vendor/k8s.io/component-base/metrics/prometheus/slis/metrics.go index 3d464d12d..39cd2ba28 100644 --- a/vendor/k8s.io/component-base/metrics/prometheus/slis/metrics.go +++ b/vendor/k8s.io/component-base/metrics/prometheus/slis/metrics.go @@ -57,6 +57,7 @@ var ( func Register(registry k8smetrics.KubeRegistry) { registry.Register(healthcheck) registry.Register(healthchecksTotal) + _ = k8smetrics.RegisterProcessStartTime(registry.Register) } func ResetHealthMetrics() { diff --git a/vendor/k8s.io/controller-manager/LICENSE b/vendor/k8s.io/controller-manager/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/k8s.io/controller-manager/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/controller-manager/pkg/informerfactory/informer_factory.go b/vendor/k8s.io/controller-manager/pkg/informerfactory/informer_factory.go new file mode 100644 index 000000000..65f928a02 --- /dev/null +++ b/vendor/k8s.io/controller-manager/pkg/informerfactory/informer_factory.go @@ -0,0 +1,56 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package informerfactory + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/informers" + "k8s.io/client-go/metadata/metadatainformer" +) + +// InformerFactory creates informers for each group version resource. +type InformerFactory interface { + ForResource(resource schema.GroupVersionResource) (informers.GenericInformer, error) + Start(stopCh <-chan struct{}) +} + +type informerFactory struct { + typedInformerFactory informers.SharedInformerFactory + metadataInformerFactory metadatainformer.SharedInformerFactory +} + +func (i *informerFactory) ForResource(resource schema.GroupVersionResource) (informers.GenericInformer, error) { + informer, err := i.typedInformerFactory.ForResource(resource) + if err != nil { + return i.metadataInformerFactory.ForResource(resource), nil + } + return informer, nil +} + +func (i *informerFactory) Start(stopCh <-chan struct{}) { + i.typedInformerFactory.Start(stopCh) + i.metadataInformerFactory.Start(stopCh) +} + +// NewInformerFactory creates a new InformerFactory which works with both typed +// resources and metadata-only resources +func NewInformerFactory(typedInformerFactory informers.SharedInformerFactory, metadataInformerFactory metadatainformer.SharedInformerFactory) InformerFactory { + return &informerFactory{ + typedInformerFactory: typedInformerFactory, + metadataInformerFactory: metadataInformerFactory, + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go b/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go index e59844786..081dae306 100644 --- a/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go @@ -326,6 +326,9 @@ func BuildOpenAPISpecFromRoutes(webServices []common.RouteContainer, config *com if err != nil { return nil, err } + if config.PostProcessSpec != nil { + return config.PostProcessSpec(a.spec) + } return a.spec, nil } diff --git a/vendor/k8s.io/kube-openapi/pkg/common/common.go b/vendor/k8s.io/kube-openapi/pkg/common/common.go index 2e15e163c..e4ce843b0 100644 --- a/vendor/k8s.io/kube-openapi/pkg/common/common.go +++ b/vendor/k8s.io/kube-openapi/pkg/common/common.go @@ -164,6 +164,9 @@ type OpenAPIV3Config struct { // It is an optional function to customize model names. GetDefinitionName func(name string) (string, spec.Extensions) + // PostProcessSpec runs after the spec is ready to serve. It allows a final modification to the spec before serving. + PostProcessSpec func(*spec3.OpenAPI) (*spec3.OpenAPI, error) + // SecuritySchemes is list of all security schemes for OpenAPI service. SecuritySchemes spec3.SecuritySchemes diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go index 799d866d5..9887d185b 100644 --- a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go +++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go @@ -214,9 +214,6 @@ func makeUnion(extensions map[string]interface{}) (schema.Union, error) { } } - if union.Discriminator != nil && len(union.Fields) == 0 { - return schema.Union{}, fmt.Errorf("discriminator set to %v, but no fields in union", *union.Discriminator) - } return union, nil } diff --git a/vendor/modules.txt b/vendor/modules.txt index a25ed314b..a5430be0c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -444,7 +444,7 @@ go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# golang.org/x/crypto v0.19.0 +# golang.org/x/crypto v0.21.0 ## explicit; go 1.18 golang.org/x/crypto/bcrypt golang.org/x/crypto/blowfish @@ -461,7 +461,7 @@ golang.org/x/crypto/scrypt ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/slices -# golang.org/x/net v0.21.0 +# golang.org/x/net v0.23.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html @@ -484,14 +484,14 @@ golang.org/x/oauth2/internal ## explicit; go 1.18 golang.org/x/sync/semaphore golang.org/x/sync/singleflight -# golang.org/x/sys v0.17.0 +# golang.org/x/sys v0.18.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.17.0 +# golang.org/x/term v0.18.0 ## explicit; go 1.18 golang.org/x/term # golang.org/x/text v0.14.0 @@ -802,7 +802,7 @@ k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.29.0 +# k8s.io/apiserver v0.29.2 ## explicit; go 1.21 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/cel @@ -1226,6 +1226,8 @@ k8s.io/client-go/listers/storage/v1 k8s.io/client-go/listers/storage/v1alpha1 k8s.io/client-go/listers/storage/v1beta1 k8s.io/client-go/metadata +k8s.io/client-go/metadata/metadatainformer +k8s.io/client-go/metadata/metadatalister k8s.io/client-go/openapi k8s.io/client-go/openapi/cached k8s.io/client-go/pkg/apis/clientauthentication @@ -1263,7 +1265,7 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/component-base v0.29.1 +# k8s.io/component-base v0.29.2 ## explicit; go 1.21 k8s.io/component-base/cli/flag k8s.io/component-base/featuregate @@ -1282,6 +1284,9 @@ k8s.io/component-base/metrics/testutil k8s.io/component-base/tracing k8s.io/component-base/tracing/api/v1 k8s.io/component-base/version +# k8s.io/controller-manager v0.29.2 +## explicit; go 1.21 +k8s.io/controller-manager/pkg/informerfactory # k8s.io/klog/v2 v2.120.1 ## explicit; go 1.18 k8s.io/klog/v2 @@ -1291,14 +1296,14 @@ k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity k8s.io/klog/v2/internal/sloghandler -# k8s.io/kms v0.29.0 +# k8s.io/kms v0.29.2 ## explicit; go 1.21 k8s.io/kms/apis/v1beta1 k8s.io/kms/apis/v2 k8s.io/kms/pkg/service k8s.io/kms/pkg/util -# k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 -## explicit; go 1.19 +# k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 +## explicit; go 1.20 k8s.io/kube-openapi/pkg/builder k8s.io/kube-openapi/pkg/builder3 k8s.io/kube-openapi/pkg/builder3/util @@ -1391,7 +1396,7 @@ open-cluster-management.io/api/utils/work/v1/workapplier open-cluster-management.io/api/utils/work/v1/workvalidator open-cluster-management.io/api/work/v1 open-cluster-management.io/api/work/v1alpha1 -# open-cluster-management.io/sdk-go v0.13.1-0.20240416062924-20307e6fe090 +# open-cluster-management.io/sdk-go v0.13.1-0.20240416062924-20307e6fe090 => github.com/morvencao/ocm-sdk-go v0.0.0-20240506074627-941a4e4cfefb ## explicit; go 1.21 open-cluster-management.io/sdk-go/pkg/apis/work/v1/applier open-cluster-management.io/sdk-go/pkg/apis/work/v1/builder @@ -1409,6 +1414,7 @@ open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/handler open-cluster-management.io/sdk-go/pkg/cloudevents/work/common +open-cluster-management.io/sdk-go/pkg/cloudevents/work/garbagecollector open-cluster-management.io/sdk-go/pkg/cloudevents/work/internal open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client @@ -1453,3 +1459,4 @@ sigs.k8s.io/structured-merge-diff/v4/value ## explicit; go 1.12 sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 +# open-cluster-management.io/sdk-go => github.com/morvencao/ocm-sdk-go v0.0.0-20240506074627-941a4e4cfefb diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/baseclient.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/baseclient.go index 0ba5c7187..8f9c71849 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/baseclient.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/baseclient.go @@ -129,8 +129,6 @@ func (c *baseClient) publish(ctx context.Context, evt cloudevents.Event) error { return err } - klog.V(4).Infof("Sent event: %v\n%s", ctx, evt) - // make sure the current client is the newest c.RLock() defer c.RUnlock() @@ -139,6 +137,7 @@ func (c *baseClient) publish(ctx context.Context, evt cloudevents.Event) error { return fmt.Errorf("the cloudevents client is not ready") } + klog.V(4).Infof("Sending event: %v\n%s", sendingCtx, evt) if result := c.cloudEventsClient.Send(sendingCtx, evt); cloudevents.IsUndelivered(result) { return fmt.Errorf("failed to send event %s, %v", evt, result) } @@ -167,6 +166,7 @@ func (c *baseClient) subscribe(ctx context.Context, receive receiveFn) { if cloudEventsClient != nil { go func() { if err := cloudEventsClient.StartReceiver(receiverCtx, func(evt cloudevents.Event) { + klog.V(4).Infof("Received event: %s", evt) receive(receiverCtx, evt) }); err != nil { runtime.HandleError(fmt.Errorf("failed to receive cloudevents, %v", err)) diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/garbagecollector/garbagecollector.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/garbagecollector/garbagecollector.go new file mode 100644 index 000000000..e725550f6 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/garbagecollector/garbagecollector.go @@ -0,0 +1,709 @@ +package garbagecollector + +import ( + "context" + "fmt" + "reflect" + "sync" + "time" + + // import known versions + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/controller-manager/pkg/informerfactory" + "k8s.io/klog/v2" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + + "k8s.io/apimachinery/pkg/util/wait" + + workv1 "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" + workv1informers "open-cluster-management.io/api/client/work/informers/externalversions/work/v1" + workapiv1 "open-cluster-management.io/api/work/v1" +) + +type eventType int + +func (e eventType) String() string { + switch e { + case addEvent: + return "add" + case updateEvent: + return "update" + case deleteEvent: + return "delete" + default: + return fmt.Sprintf("unknown(%d)", int(e)) + } +} + +const ( + addEvent eventType = iota + updateEvent + deleteEvent +) + +type event struct { + eventType eventType + obj interface{} + // the update event comes with an old object, but it's not used by the garbage collector. + oldObj interface{} + gvk schema.GroupVersionKind +} + +// monitor runs a Controller with a local stop channel. +type monitor struct { + controller cache.Controller + + // stopCh stops Controller. If stopCh is nil, the monitor is considered to be + // not yet started. + stopCh chan struct{} +} + +// Run is intended to be called in a goroutine. Multiple calls of this is an error. +func (m *monitor) Run() { + m.controller.Run(m.stopCh) +} + +type monitors map[schema.GroupVersionResource]*monitor + +type dependent struct { + ownerUID types.UID + namespacedName types.NamespacedName +} + +type GarbageCollector struct { + workClient workv1.WorkV1Interface + // workInformer from cloudevents client builder + workInformer workv1informers.ManifestWorkInformer + + // metadataClient is used to operator on the owner resources. + // metadataClient metadata.Interface + // sharedInformers is used to create informers for the owner resources. + sharedInformers informerfactory.InformerFactory + restMapper meta.RESTMapper + + // each monitor list/watches a resource (including manifestwork), + // the results are funneled to the ownerChanges + monitors monitors + monitorLock sync.RWMutex + + // monitors are the producer of the ownerChanges queue, garbage collector alters + // the in-memory owner relationship according to the changes. + ownerChanges workqueue.RateLimitingInterface + + // owners is a map of resource to the number of manifestworks that it owns. + owners map[schema.GroupVersionResource]int + ownersLock sync.RWMutex + + // ownerToDependents is a map of owner UID to the UIDs of the dependent manifestworks. + ownerToDependents map[types.UID][]types.NamespacedName + ownerToDependentsLock sync.RWMutex + + // garbage collector attempts to delete the items in attemptToDelete queue when the time is ripe. + attemptToDelete workqueue.RateLimitingInterface + // garbage collector attempts to orphan the dependents of the items in the attemptToOrphan queue, then deletes the items. + attemptToOrphan workqueue.RateLimitingInterface + + // workerLock ensures workers are paused to avoid processing events before informers have resynced. + workerLock sync.RWMutex +} + +func NewGarbageCollector( + workClient workv1.WorkV1Interface, + // metadataClient metadata.Interface, + restMapper meta.RESTMapper, + workInformer workv1informers.ManifestWorkInformer, + sharedInformers informerfactory.InformerFactory) *GarbageCollector { + return &GarbageCollector{ + workClient: workClient, + // metadataClient: metadataClient, + restMapper: restMapper, + workInformer: workInformer, + sharedInformers: sharedInformers, + owners: make(map[schema.GroupVersionResource]int), + ownerToDependents: make(map[types.UID][]types.NamespacedName), + ownerChanges: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_owner_changes"), + attemptToDelete: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_attempt_to_delete"), + attemptToOrphan: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_attempt_to_orphan"), + } +} + +// Run starts garbage collector monitors and workers. +func (gc *GarbageCollector) Run(ctx context.Context, workers int) { + defer gc.attemptToDelete.ShutDown() + defer gc.attemptToOrphan.ShutDown() + defer gc.ownerChanges.ShutDown() + + logger := klog.FromContext(ctx) + logger.Info("Starting garbage collector") + defer logger.Info("Shutting down garbage collector") + + go gc.runMonitors(ctx, logger) + + if !cache.WaitForNamedCacheSync("garbage collector", ctx.Done(), func() bool { + return gc.HasSynced(logger) + }) { + return + } + + logger.Info("All resource monitors have synced. Proceeding to collect garbage") + + // run gc workers + for i := 0; i < workers; i++ { + go wait.UntilWithContext(ctx, gc.runAttemptToDeleteWorker, 1*time.Second) + go wait.UntilWithContext(ctx, gc.runAttemptToOrphanWorker, 1*time.Second) + } + + <-ctx.Done() +} + +func (gc *GarbageCollector) runMonitors(ctx context.Context, logger klog.Logger) { + // add monitor for manifestwork + gc.monitorLock.Lock() + gc.monitors = make(monitors) + gc.monitors[workapiv1.SchemeGroupVersion.WithResource("manifestworks")] = gc.manifestWorkMonitor() + gc.monitorLock.Unlock() + + gc.startMonitors(ctx, logger) + + // blocking call + wait.UntilWithContext(ctx, gc.runProcessOwnerChangesWorker, 1*time.Second) + + gc.monitorLock.Lock() + defer gc.monitorLock.Unlock() + monitors := gc.monitors + stopped := 0 + for _, monitor := range monitors { + if monitor.stopCh != nil { + stopped++ + close(monitor.stopCh) + } + } + + // reset monitors so that the garbage collector can be safely re-run. + gc.monitors = nil + + logger.V(4).Info("stopped monitors", stopped, len(monitors)) +} + +func (gc *GarbageCollector) startMonitors(ctx context.Context, logger klog.Logger) { + logger.Info("Starting monitors") + gc.monitorLock.Lock() + defer gc.monitorLock.Unlock() + monitors := gc.monitors + started := 0 + for _, monitor := range monitors { + if monitor.stopCh == nil { + monitor.stopCh = make(chan struct{}) + gc.sharedInformers.Start(ctx.Done()) + go monitor.Run() + started++ + } + } + logger.V(4).Info("started new monitors", started, len(monitors)) +} + +func (gc *GarbageCollector) runProcessOwnerChangesWorker(ctx context.Context) { + for gc.processOwnerChangesWorker(ctx) { + } +} + +func (gc *GarbageCollector) processOwnerChangesWorker(ctx context.Context) bool { + item, quit := gc.ownerChanges.Get() + if quit { + return false + } + defer gc.ownerChanges.Done(item) + event, ok := item.(*event) + if !ok { + utilruntime.HandleError(fmt.Errorf("expect a *event, got %v", item)) + return true + } + obj := event.obj + accessor, err := meta.Accessor(obj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("cannot access obj: %v", err)) + return true + } + + var oldAccessor metav1.Object + if event.eventType == updateEvent { + oldAccessor, err = meta.Accessor(event.oldObj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("cannot access obj: %v", err)) + return true + } + } + + if event.gvk.String() == workapiv1.SchemeGroupVersion.WithKind("ManifestWork").String() { + addedOwnerResource := make([]schema.GroupVersionResource, 0) + removedOwnerResource := make([]schema.GroupVersionResource, 0) + owners := accessor.GetOwnerReferences() + + switch { + case event.eventType == addEvent: + for _, owner := range owners { + gc.ownerToDependentsLock.Lock() + if _, exist := gc.ownerToDependents[owner.UID]; !exist { + gc.ownerToDependents[owner.UID] = make([]types.NamespacedName, 0) + // TODO: add finalizer to owner object to block onwer deletion + } + gc.ownerToDependents[owner.UID] = append(gc.ownerToDependents[owner.UID], types.NamespacedName{Namespace: accessor.GetNamespace(), Name: accessor.GetName()}) + gc.ownerToDependentsLock.Unlock() + + ownerGVK := schema.FromAPIVersionAndKind(owner.APIVersion, owner.Kind) + mapping, err := gc.restMapper.RESTMapping(ownerGVK.GroupKind(), ownerGVK.Version) + if err != nil { + utilruntime.HandleError(fmt.Errorf("cannot get mapping for %v: %v", ownerGVK, err)) + } + gc.ownersLock.Lock() + if _, exist := gc.owners[mapping.Resource]; !exist { + addedOwnerResource = append(addedOwnerResource, mapping.Resource) + gc.owners[mapping.Resource] = 1 + } else { + gc.owners[mapping.Resource] = gc.owners[mapping.Resource] + 1 + } + gc.ownersLock.Unlock() + } + case event.eventType == updateEvent: + oldOwners := oldAccessor.GetOwnerReferences() + added, removed, _ := ownerReferencesDiffs(oldOwners, owners) + for _, owner := range added { + gc.ownerToDependentsLock.Lock() + if _, exist := gc.ownerToDependents[owner.UID]; !exist { + gc.ownerToDependents[owner.UID] = make([]types.NamespacedName, 0) + // TODO: add finalizer to owner object to block onwer deletion + } + gc.ownerToDependents[owner.UID] = append(gc.ownerToDependents[owner.UID], types.NamespacedName{Namespace: accessor.GetNamespace(), Name: accessor.GetName()}) + gc.ownerToDependentsLock.Unlock() + + ownerGVK := schema.FromAPIVersionAndKind(owner.APIVersion, owner.Kind) + mapping, err := gc.restMapper.RESTMapping(ownerGVK.GroupKind(), ownerGVK.Version) + if err != nil { + utilruntime.HandleError(fmt.Errorf("cannot get mapping for %v: %v", ownerGVK, err)) + } + gc.ownersLock.Lock() + if _, exist := gc.owners[mapping.Resource]; !exist { + addedOwnerResource = append(addedOwnerResource, mapping.Resource) + gc.owners[mapping.Resource] = 1 + } else { + gc.owners[mapping.Resource] = gc.owners[mapping.Resource] + 1 + } + gc.ownersLock.Unlock() + } + for _, owner := range removed { + gc.ownerToDependentsLock.Lock() + if _, exist := gc.ownerToDependents[owner.UID]; exist { + for i, namespacedName := range gc.ownerToDependents[owner.UID] { + if namespacedName.Name == accessor.GetName() && namespacedName.Namespace == accessor.GetNamespace() { + gc.ownerToDependents[owner.UID] = append(gc.ownerToDependents[owner.UID][:i], gc.ownerToDependents[owner.UID][i+1:]...) + break + } + } + } + gc.ownerToDependentsLock.Unlock() + + ownerGVK := schema.FromAPIVersionAndKind(owner.APIVersion, owner.Kind) + mapping, err := gc.restMapper.RESTMapping(ownerGVK.GroupKind(), ownerGVK.Version) + if err != nil { + utilruntime.HandleError(fmt.Errorf("cannot get mapping for %v: %v", ownerGVK, err)) + } + + if _, exist := gc.owners[mapping.Resource]; exist { + gc.ownersLock.Lock() + gc.owners[mapping.Resource] = gc.owners[mapping.Resource] - 1 + if gc.owners[mapping.Resource] == 0 { + removedOwnerResource = append(removedOwnerResource, mapping.Resource) + delete(gc.owners, mapping.Resource) + } + gc.ownersLock.Unlock() + } + } + // TODO: handle changed owner references + case event.eventType == deleteEvent: + // TODO: block deletion of manifestwork if the owner is not being deleted + for _, owner := range owners { + gc.ownerToDependentsLock.Lock() + if _, exist := gc.ownerToDependents[owner.UID]; exist { + for i, namespacedName := range gc.ownerToDependents[owner.UID] { + if namespacedName.Name == accessor.GetName() && namespacedName.Namespace == accessor.GetNamespace() { + gc.ownerToDependents[owner.UID] = append(gc.ownerToDependents[owner.UID][:i], gc.ownerToDependents[owner.UID][i+1:]...) + break + } + } + } + gc.ownerToDependentsLock.Unlock() + + ownerGVK := schema.FromAPIVersionAndKind(owner.APIVersion, owner.Kind) + mapping, err := gc.restMapper.RESTMapping(ownerGVK.GroupKind(), ownerGVK.Version) + if err != nil { + utilruntime.HandleError(fmt.Errorf("cannot get mapping for %v: %v", ownerGVK, err)) + } + if _, exist := gc.owners[mapping.Resource]; exist { + gc.ownersLock.Lock() + gc.owners[mapping.Resource] = gc.owners[mapping.Resource] - 1 + if gc.owners[mapping.Resource] == 0 { + removedOwnerResource = append(removedOwnerResource, mapping.Resource) + delete(gc.owners, mapping.Resource) + } + gc.ownersLock.Unlock() + } + } + } + + // sync monitors + logger := klog.FromContext(ctx) + gc.syncMonitors(ctx, logger, addedOwnerResource, removedOwnerResource) + } else { + // only handle delete event for owner resources + if event.eventType == deleteEvent { + gc.ownerToDependentsLock.RLock() + dependents, exist := gc.ownerToDependents[accessor.GetUID()] + gc.ownerToDependentsLock.RUnlock() + if exist { + for _, namespacedName := range dependents { + gc.attemptToDelete.Add(&dependent{ownerUID: accessor.GetUID(), namespacedName: namespacedName}) + } + } + } + } + + return true +} + +// func (gc *GarbageCollector) addFinalizerForOwner(ctx context.Context, logger klog.Logger, owner metav1.OwnerReference) { +// ownerGVK := schema.FromAPIVersionAndKind(owner.APIVersion, owner.Kind) +// mapping, err := gc.restMapper.RESTMapping(ownerGVK.GroupKind(), ownerGVK.Version) +// if err != nil { +// utilruntime.HandleError(fmt.Errorf("cannot get mapping for %v: %v", ownerGVK, err)) +// return +// } +// } + +func (gc *GarbageCollector) syncMonitors(ctx context.Context, logger klog.Logger, addedOwnerResource, removedOwnerResource []schema.GroupVersionResource) error { + gc.monitorLock.Lock() + defer gc.monitorLock.Unlock() + + // Ensure workers are paused to avoid processing events before informers + // have resynced. + gc.workerLock.Lock() + defer gc.workerLock.Unlock() + + monitors := gc.monitors + errs := []error{} + for _, resource := range addedOwnerResource { + if _, exist := monitors[resource]; !exist { + kind, err := gc.restMapper.KindFor(resource) + if err != nil { + errs = append(errs, fmt.Errorf("couldn't look up resource %q: %v", resource, err)) + continue + } + c, err := gc.controllerFor(logger, resource, kind) + if err != nil { + errs = append(errs, fmt.Errorf("couldn't start monitor for resource %q: %v", resource, err)) + continue + } + monitors[resource] = &monitor{controller: c} + } + } + + if utilerrors.NewAggregate(errs) != nil { + return utilerrors.NewAggregate(errs) + } + + for _, resource := range removedOwnerResource { + if monitor, exist := monitors[resource]; exist { + close(monitor.stopCh) + delete(monitors, resource) + } + } + + logger.V(4).Info("synced monitors", "added", len(addedOwnerResource), "removed", len(removedOwnerResource)) + + // start monitors + gc.startMonitors(ctx, logger) + + if !cache.WaitForNamedCacheSync("garbage collector", ctx.Done(), func() bool { + return gc.HasSynced(logger) + }) { + return fmt.Errorf("timed out waiting for caches to sync") + } + + return nil +} + +func (gc *GarbageCollector) controllerFor(logger klog.Logger, resource schema.GroupVersionResource, kind schema.GroupVersionKind) (cache.Controller, error) { + handlers := cache.ResourceEventHandlerFuncs{ + // add the event to the dependencyGraphBuilder's graphChanges. + AddFunc: func(obj interface{}) { + event := &event{ + eventType: addEvent, + obj: obj, + gvk: kind, + } + gc.ownerChanges.Add(event) + }, + UpdateFunc: func(oldObj, newObj interface{}) { + // TODO: check if there are differences in the ownerRefs, + // finalizers, and DeletionTimestamp; if not, ignore the update. + event := &event{ + eventType: updateEvent, + obj: newObj, + oldObj: oldObj, + gvk: kind, + } + gc.ownerChanges.Add(event) + }, + DeleteFunc: func(obj interface{}) { + // delta fifo may wrap the object in a cache.DeletedFinalStateUnknown, unwrap it + if deletedFinalStateUnknown, ok := obj.(cache.DeletedFinalStateUnknown); ok { + obj = deletedFinalStateUnknown.Obj + } + event := &event{ + eventType: deleteEvent, + obj: obj, + gvk: kind, + } + gc.ownerChanges.Add(event) + }, + } + + shared, err := gc.sharedInformers.ForResource(resource) + if err != nil { + logger.V(4).Error(err, "unable to use a shared informer", "resource", resource, "kind", kind) + return nil, err + } + logger.V(4).Info("using a shared informer", "resource", resource, "kind", kind) + // need to clone because it's from a shared cache + shared.Informer().AddEventHandlerWithResyncPeriod(handlers, 0) + return shared.Informer().GetController(), nil +} + +type ownerReferenceChange struct { + oldOwnerRef metav1.OwnerReference + newOwnerRef metav1.OwnerReference +} + +func ownerReferencesDiffs(old []metav1.OwnerReference, new []metav1.OwnerReference) (added []metav1.OwnerReference, removed []metav1.OwnerReference, changed []ownerReferenceChange) { + oldUIDToRef := make(map[string]metav1.OwnerReference) + for _, value := range old { + oldUIDToRef[string(value.UID)] = value + } + oldUIDSet := sets.StringKeySet(oldUIDToRef) + for _, value := range new { + newUID := string(value.UID) + if oldUIDSet.Has(newUID) { + if !reflect.DeepEqual(oldUIDToRef[newUID], value) { + changed = append(changed, ownerReferenceChange{oldOwnerRef: oldUIDToRef[newUID], newOwnerRef: value}) + } + oldUIDSet.Delete(newUID) + } else { + added = append(added, value) + } + } + for oldUID := range oldUIDSet { + removed = append(removed, oldUIDToRef[oldUID]) + } + + return added, removed, changed +} + +// HasSynced returns true if any monitors exist AND all those monitors' +// controllers HasSynced functions return true. This means IsSynced could return +// true at one time, and then later return false if all monitors were +// reconstructed. +func (gc *GarbageCollector) HasSynced(logger klog.Logger) bool { + gc.monitorLock.Lock() + defer gc.monitorLock.Unlock() + + if len(gc.monitors) == 0 { + logger.V(4).Info("garbage controller monitor not synced: no monitors") + return false + } + + for resource, monitor := range gc.monitors { + if !monitor.controller.HasSynced() { + logger.V(4).Info("garbage controller monitor not yet synced", "resource", resource) + return false + } + } + return true +} + +func (gc *GarbageCollector) runAttemptToDeleteWorker(ctx context.Context) { + for gc.processAttemptToDeleteWorker(ctx) { + } +} + +func (gc *GarbageCollector) runAttemptToOrphanWorker(ctx context.Context) { + for gc.processAttemptToOrphanWorker(ctx) { + } +} + +func (gc *GarbageCollector) processAttemptToDeleteWorker(ctx context.Context) bool { + item, quit := gc.attemptToDelete.Get() + gc.workerLock.RLock() + defer gc.workerLock.RUnlock() + if quit { + return false + } + defer gc.attemptToDelete.Done(item) + + action := gc.attemptToDeleteWorker(ctx, item) + switch action { + case forgetItem: + gc.attemptToDelete.Forget(item) + case requeueItem: + gc.attemptToDelete.AddRateLimited(item) + } + + return true +} + +type workQueueItemAction int + +const ( + requeueItem = iota + forgetItem +) + +func (gc *GarbageCollector) attemptToDeleteOrOrphan(ctx context.Context, item interface{}, orphan bool) workQueueItemAction { + dep, ok := item.(*dependent) + if !ok { + utilruntime.HandleError(fmt.Errorf("expect a *dependent, got %v", item)) + return forgetItem + } + + latest, err := gc.getManifestwork(ctx, dep.namespacedName) + if err != nil { + return requeueItem + } + + // may happen when manifestwork deletion failed in last attempt + ownerReferences := latest.GetOwnerReferences() + if len(ownerReferences) == 0 { + if err := gc.deleteManifestwork(ctx, latest.GetUID(), dep.namespacedName, metav1.DeletePropagationBackground); err != nil { + return requeueItem + } + return forgetItem + } + + found := false + for _, owner := range ownerReferences { + if owner.UID == dep.ownerUID { + found = true + break + } + } + + if found { + smp, err := generateDeleteOwnerRefStrategicMergeBytes(latest.GetUID(), dep.ownerUID) + if err != nil { + return requeueItem + } + + if _, err := gc.patchManifestwork(ctx, dep.namespacedName, smp, types.StrategicMergePatchType); err != nil { + if errors.IsUnsupportedMediaType(err) { + // StrategicMergePatch is not supported, use JSON merge patch instead + jmp, err := generateDeleteOwnerRefJSONMergePatch(latest, dep.ownerUID) + if err != nil { + return requeueItem + } + if _, err = gc.patchManifestwork(ctx, dep.namespacedName, jmp, types.MergePatchType); err != nil { + return requeueItem + } + } + } + + // when the manifestwork has only one owner, delete it. + if len(ownerReferences) == 1 && !orphan { + // if the manifestwork is not deleted, requeue it. + if err := gc.deleteManifestwork(ctx, latest.GetUID(), dep.namespacedName, metav1.DeletePropagationBackground); err != nil { + return requeueItem + } + } + + } + + return forgetItem +} + +func (gc *GarbageCollector) attemptToDeleteWorker(ctx context.Context, item interface{}) workQueueItemAction { + return gc.attemptToDeleteOrOrphan(ctx, item, false) +} + +func (gc *GarbageCollector) processAttemptToOrphanWorker(ctx context.Context) bool { + item, quit := gc.attemptToOrphan.Get() + gc.workerLock.RLock() + defer gc.workerLock.RUnlock() + if quit { + return false + } + defer gc.attemptToOrphan.Done(item) + + action := gc.attemptToOrphanWorker(ctx, item) + switch action { + case forgetItem: + gc.attemptToOrphan.Forget(item) + case requeueItem: + gc.attemptToOrphan.AddRateLimited(item) + } + + return true +} + +func (gc *GarbageCollector) attemptToOrphanWorker(ctx context.Context, item interface{}) workQueueItemAction { + return gc.attemptToDeleteOrOrphan(ctx, item, true) +} + +func (gc *GarbageCollector) manifestWorkMonitor() *monitor { + kind := workapiv1.SchemeGroupVersion.WithKind("ManifestWork") + handlers := cache.ResourceEventHandlerFuncs{ + // add the event to the dependencyGraphBuilder's graphChanges. + AddFunc: func(obj interface{}) { + event := &event{ + eventType: addEvent, + obj: obj, + gvk: kind, + } + gc.ownerChanges.Add(event) + }, + UpdateFunc: func(oldObj, newObj interface{}) { + // TODO: check if there are differences in the ownerRefs, + // finalizers, and DeletionTimestamp; if not, ignore the update. + event := &event{ + eventType: updateEvent, + obj: newObj, + oldObj: oldObj, + gvk: kind, + } + gc.ownerChanges.Add(event) + }, + DeleteFunc: func(obj interface{}) { + // delta fifo may wrap the object in a cache.DeletedFinalStateUnknown, unwrap it + if deletedFinalStateUnknown, ok := obj.(cache.DeletedFinalStateUnknown); ok { + obj = deletedFinalStateUnknown.Obj + } + event := &event{ + eventType: deleteEvent, + obj: obj, + gvk: kind, + } + gc.ownerChanges.Add(event) + }, + } + + gc.workInformer.Informer().AddEventHandlerWithResyncPeriod(handlers, 0) + return &monitor{ + controller: gc.workInformer.Informer().GetController(), + } +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/garbagecollector/operations.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/garbagecollector/operations.go new file mode 100644 index 000000000..272a5e3d2 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/garbagecollector/operations.go @@ -0,0 +1,264 @@ +package garbagecollector + +import ( + "context" + "encoding/json" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + workapiv1 "open-cluster-management.io/api/work/v1" +) + +func (gc *GarbageCollector) getManifestwork(ctx context.Context, namespacedName types.NamespacedName) (*workapiv1.ManifestWork, error) { + return gc.workClient.ManifestWorks(namespacedName.Namespace).Get(ctx, namespacedName.Name, metav1.GetOptions{}) +} + +func (gc *GarbageCollector) patchManifestwork(ctx context.Context, namespacedName types.NamespacedName, patch []byte, pt types.PatchType) (*workapiv1.ManifestWork, error) { + return gc.workClient.ManifestWorks(namespacedName.Namespace).Patch(ctx, namespacedName.Name, pt, patch, metav1.PatchOptions{}) +} + +func (gc *GarbageCollector) deleteManifestwork(ctx context.Context, uid types.UID, namespacedName types.NamespacedName, policy metav1.DeletionPropagation) error { + preconditions := metav1.Preconditions{UID: &uid} + deleteOptions := metav1.DeleteOptions{Preconditions: &preconditions, PropagationPolicy: &policy} + return gc.workClient.ManifestWorks(namespacedName.Namespace).Delete(ctx, namespacedName.Name, deleteOptions) +} + +// type restMappingError struct { +// kind string +// version string +// } + +// func (r *restMappingError) Error() string { +// versionKind := fmt.Sprintf("%s/%s", r.version, r.kind) +// return fmt.Sprintf("unable to get REST mapping for %s.", versionKind) +// } + +// // Message prints more details +// func (r *restMappingError) Message() string { +// versionKind := fmt.Sprintf("%s/%s", r.version, r.kind) +// errMsg := fmt.Sprintf("unable to get REST mapping for %s. ", versionKind) +// errMsg += fmt.Sprintf(" If %s is an invalid resource, then you should manually remove ownerReferences that refer %s objects.", versionKind, versionKind) +// return errMsg +// } + +// func newRESTMappingError(kind, version string) *restMappingError { +// return &restMappingError{kind: kind, version: version} +// } + +// type ownerObjectReference struct { +// metav1.OwnerReference +// namespace string +// } + +// // String is used when logging an ownerObjectReference in text format. +// func (o ownerObjectReference) String() string { +// return fmt.Sprintf("[%s/%s, namespace: %s, name: %s, uid: %s]", o.APIVersion, o.Kind, o.namespace, o.Name, o.UID) +// } + +// // cluster scoped owner resources don't have namespaces, namespace scoped owner resources default to the manifestwork's namespace +// func resourceDefaultNamespace(namespaced bool, defaultNamespace string) string { +// if namespaced { +// return defaultNamespace +// } +// return "" +// } + +// // apiResource consults the REST mapper to translate an +// // tuple to a unversioned.APIResource struct. +// func (gc *GarbageCollector) apiResource(apiVersion, kind string) (schema.GroupVersionResource, bool, error) { +// fqKind := schema.FromAPIVersionAndKind(apiVersion, kind) +// mapping, err := gc.restMapper.RESTMapping(fqKind.GroupKind(), fqKind.Version) +// if err != nil { +// return schema.GroupVersionResource{}, false, newRESTMappingError(kind, apiVersion) +// } +// return mapping.Resource, mapping.Scope == meta.RESTScopeNamespace, nil +// } + +// func (gc *GarbageCollector) getOwnerObject(ctx context.Context, owner ownerObjectReference) (*metav1.PartialObjectMetadata, error) { +// resource, namespaced, err := gc.apiResource(owner.APIVersion, owner.Kind) +// if err != nil { +// return nil, err +// } +// namespace := resourceDefaultNamespace(namespaced, owner.namespace) +// if namespaced && len(namespace) == 0 { +// // the owner type is namespaced, but we have no namespace coordinate. +// return nil, fmt.Errorf("empty namespace for namespace scoped owner object %s/%s - %s/%s", owner.APIVersion, owner.Kind, namespace, owner.Name) +// } +// return gc.metadataClient.Resource(resource).Namespace(namespace).Get(ctx, owner.Name, metav1.GetOptions{}) +// } + +// func (gc *GarbageCollector) patchOwnerObject(ctx context.Context, owner ownerObjectReference, patch []byte, pt types.PatchType) (*metav1.PartialObjectMetadata, error) { +// resource, namespaced, err := gc.apiResource(owner.APIVersion, owner.Kind) +// if err != nil { +// return nil, err +// } +// namespace := resourceDefaultNamespace(namespaced, owner.namespace) +// if namespaced && len(namespace) == 0 { +// // the owner type is namespaced, but we have no namespace coordinate. +// return nil, fmt.Errorf("empty namespace for namespace scoped owner object %s/%s - %s/%s", owner.APIVersion, owner.Kind, namespace, owner.Name) +// } +// return gc.metadataClient.Resource(resource).Namespace(namespace).Patch(ctx, owner.Name, pt, patch, metav1.PatchOptions{}) +// } + +// func (gc *GarbageCollector) deleteOwnerObject(ctx context.Context, owner ownerObjectReference, policy *metav1.DeletionPropagation) error { +// resource, namespaced, err := gc.apiResource(owner.APIVersion, owner.Kind) +// if err != nil { +// return err +// } +// namespace := resourceDefaultNamespace(namespaced, owner.namespace) +// if namespaced && len(namespace) == 0 { +// // the owner type is namespaced, but we have no namespace coordinate. +// return fmt.Errorf("empty namespace for namespace scoped owner object %s/%s - %s/%s", owner.APIVersion, owner.Kind, namespace, owner.Name) +// } +// uid := owner.UID +// preconditions := metav1.Preconditions{UID: &uid} +// deleteOptions := metav1.DeleteOptions{Preconditions: &preconditions, PropagationPolicy: policy} +// return gc.metadataClient.Resource(resource).Namespace(namespace).Delete(ctx, owner.Name, deleteOptions) +// } + +// func (gc *GarbageCollector) removeFinalizer(ctx context.Context, owner ownerObjectReference, targetFinalizer string) error { +// err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { +// ownerObject, err := gc.getOwnerObject(ctx, owner) +// if errors.IsNotFound(err) { +// return nil +// } +// if err != nil { +// return fmt.Errorf("cannot finalize owner %s, because cannot get it: %v. The garbage collector will retry later", &owner, err) +// } +// accessor, err := meta.Accessor(ownerObject) +// if err != nil { +// return fmt.Errorf("cannot access the owner object %v: %v. The garbage collector will retry later", ownerObject, err) +// } +// finalizers := accessor.GetFinalizers() +// var newFinalizers []string +// found := false +// for _, f := range finalizers { +// if f == targetFinalizer { +// found = true +// continue +// } +// newFinalizers = append(newFinalizers, f) +// } +// if !found { +// // logger.V(5).Info("finalizer already removed from object", "finalizer", targetFinalizer, "object", owner) +// return nil +// } + +// // remove the finalizer +// patch, err := json.Marshal(&objectForFinalizersPatch{ +// ObjectMetaForFinalizersPatch: ObjectMetaForFinalizersPatch{ +// ResourceVersion: accessor.GetResourceVersion(), +// Finalizers: newFinalizers, +// }, +// }) +// if err != nil { +// return fmt.Errorf("unable to finalize %s due to an error serializing patch: %v", owner, err) +// } +// _, err = gc.patchOwnerObject(ctx, owner, patch, types.MergePatchType) +// return err +// }) +// if errors.IsConflict(err) { +// return fmt.Errorf("updateMaxRetries(%d) has reached. The garbage collector will retry later for owner %v", retry.DefaultBackoff.Steps, owner) +// } +// return err +// } + +// func (gc *GarbageCollector) addFinalizer(ctx context.Context, owner ownerObjectReference, targetFinalizer string) error { +// err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { +// ownerObject, err := gc.getOwnerObject(ctx, owner) +// if errors.IsNotFound(err) { +// return nil +// } +// if err != nil { +// return fmt.Errorf("cannot finalize owner %s, because cannot get it: %v. The garbage collector will retry later", &owner, err) +// } +// accessor, err := meta.Accessor(ownerObject) +// if err != nil { +// return fmt.Errorf("cannot access the owner object %v: %v. The garbage collector will retry later", ownerObject, err) +// } +// finalizers := accessor.GetFinalizers() +// for _, f := range finalizers { +// if f == targetFinalizer { +// // logger.V(5).Info("finalizer already added to object", "finalizer", targetFinalizer, "object", owner) +// return nil +// } +// } + +// // add the finalizer +// finalizers = append(finalizers, targetFinalizer) +// patch, err := json.Marshal(&objectForFinalizersPatch{ +// ObjectMetaForFinalizersPatch: ObjectMetaForFinalizersPatch{ +// ResourceVersion: accessor.GetResourceVersion(), +// Finalizers: finalizers, +// }, +// }) +// if err != nil { +// return fmt.Errorf("unable to finalize %s due to an error serializing patch: %v", owner, err) +// } +// _, err = gc.patchOwnerObject(ctx, owner, patch, types.MergePatchType) +// return err +// }) +// if errors.IsConflict(err) { +// return fmt.Errorf("updateMaxRetries(%d) has reached. The garbage collector will retry later for owner %v", retry.DefaultBackoff.Steps, owner) +// } +// return err +// } + +// type objectForFinalizersPatch struct { +// ObjectMetaForFinalizersPatch `json:"metadata"` +// } + +// // ObjectMetaForFinalizersPatch defines object meta struct for finalizers patch operation. +// type ObjectMetaForFinalizersPatch struct { +// ResourceVersion string `json:"resourceVersion"` +// Finalizers []string `json:"finalizers"` +// } + +type objectForOwnerRefsPatch struct { + ObjectMetaForOwnerRefsPatch `json:"metadata"` +} + +// ObjectMetaForOwnerRefsPatch defines object meta struct for owner references patch operation. +type ObjectMetaForOwnerRefsPatch struct { + ResourceVersion string `json:"resourceVersion"` + OwnerReferences []metav1.OwnerReference `json:"ownerReferences"` +} + +type objectForDeleteOwnerRefStrategicMergePatch struct { + ObjectMetaForOwnerRefsStrategicMergePatch `json:"metadata"` +} + +type ObjectMetaForOwnerRefsStrategicMergePatch struct { + UID types.UID `json:"uid"` + OwnerReferences []map[string]string `json:"ownerReferences"` +} + +// returns a strategic merge patch that removes the owner reference matching ownerUID. +func generateDeleteOwnerRefStrategicMergeBytes(uid types.UID, ownerUID types.UID) ([]byte, error) { + ownerReferences := []map[string]string{ + { + "$patch": "delete", + "uid": string(ownerUID), + }, + } + patch := &objectForDeleteOwnerRefStrategicMergePatch{ + ObjectMetaForOwnerRefsStrategicMergePatch: ObjectMetaForOwnerRefsStrategicMergePatch{ + UID: uid, + OwnerReferences: ownerReferences, + }, + } + return json.Marshal(patch) +} + +// returns JSON merge patch that removes the ownerReferences matching ownerUID. +func generateDeleteOwnerRefJSONMergePatch(obj metav1.Object, ownerUID types.UID) ([]byte, error) { + expectedObjectMeta := objectForOwnerRefsPatch{} + expectedObjectMeta.ResourceVersion = obj.GetResourceVersion() + refs := obj.GetOwnerReferences() + for _, ref := range refs { + if ref.UID != ownerUID { + expectedObjectMeta.OwnerReferences = append(expectedObjectMeta.OwnerReferences, ref) + } + } + return json.Marshal(expectedObjectMeta) +}