From c22484d6cb954afaddcc7c5bf9d2d612162c9f8f Mon Sep 17 00:00:00 2001 From: Matthew Heon Date: Wed, 27 Sep 2023 14:12:30 -0400 Subject: [PATCH] Add support for volume sharing on gvproxy for Windows Specifically, gvproxy will act as a 9p server, serving shares requested by the caller on HyperV vsocks, to allow the guest VM to access content on the host. The vsocks are intended to be managed by the caller in this model. As such, gvproxy receives the path to be shared and a vsock port number to share it on via CLI. An arbitrary number of these are accepted, as each share needs a separate server and vsock (they will be mounted by the Linux kernel 9p code within the guest VM, which does not support multiplexing multiple shares over a single vsock). Also, make a slight change to .gitignore - the ./bin/ ignore pattern was not ignoring the bin/ directory for me (removing the leading . fixes things). Signed-off-by: Matthew Heon --- .gitignore | 2 +- cmd/gvproxy/main.go | 23 + go.mod | 9 +- go.sum | 79 +- pkg/fileserver/plan9/serve.go | 95 + pkg/fileserver/server_unsupported.go | 12 + pkg/fileserver/server_windows.go | 61 + vendor/github.com/hugelgupf/p9/LICENSE | 202 ++ .../hugelgupf/p9/fsimpl/localfs/localfs.go | 281 ++ .../hugelgupf/p9/fsimpl/localfs/readdir.go | 50 + .../p9/fsimpl/localfs/system_unix.go | 40 + .../p9/fsimpl/localfs/system_windows.go | 50 + .../p9/fsimpl/localfs/system_xattr.go | 25 + .../p9/fsimpl/templatefs/readonly.go | 194 ++ .../p9/fsimpl/templatefs/unimplfs.go | 193 ++ .../hugelgupf/p9/fsimpl/xattr/xattr.go | 39 + .../p9/fsimpl/xattr/xattr_windows.go | 11 + .../github.com/hugelgupf/p9/internal/doc.go | 2 + .../hugelgupf/p9/internal/stat_bsd.go | 31 + .../hugelgupf/p9/internal/stat_openbsd.go | 28 + .../hugelgupf/p9/internal/stat_standard.go | 31 + .../hugelgupf/p9/internal/stat_unix.go | 11 + .../hugelgupf/p9/internal/stat_windows.go | 74 + vendor/github.com/hugelgupf/p9/linux/errno.go | 291 ++ .../github.com/hugelgupf/p9/linux/errors.go | 38 + .../hugelgupf/p9/linux/errors_linux.go | 17 + .../hugelgupf/p9/linux/errors_unix.go | 21 + .../hugelgupf/p9/linux/errors_windows.go | 28 + vendor/github.com/hugelgupf/p9/p9/buffer.go | 253 ++ vendor/github.com/hugelgupf/p9/p9/client.go | 344 +++ .../github.com/hugelgupf/p9/p9/client_file.go | 568 ++++ vendor/github.com/hugelgupf/p9/p9/file.go | 274 ++ vendor/github.com/hugelgupf/p9/p9/fuzz.go | 27 + vendor/github.com/hugelgupf/p9/p9/handlers.go | 1390 ++++++++++ vendor/github.com/hugelgupf/p9/p9/messages.go | 2348 +++++++++++++++++ vendor/github.com/hugelgupf/p9/p9/p9.go | 1167 ++++++++ .../github.com/hugelgupf/p9/p9/path_tree.go | 238 ++ vendor/github.com/hugelgupf/p9/p9/pool.go | 65 + vendor/github.com/hugelgupf/p9/p9/server.go | 681 +++++ .../github.com/hugelgupf/p9/p9/transport.go | 245 ++ vendor/github.com/hugelgupf/p9/p9/version.go | 134 + .../hugelgupf/p9/vecnet/iov32_linux.go | 22 + .../hugelgupf/p9/vecnet/iov_linux.go | 22 + .../github.com/hugelgupf/p9/vecnet/vecnet.go | 54 + .../hugelgupf/p9/vecnet/vecnet_linux.go | 112 + .../hugelgupf/p9/vecnet/vecnet_other.go | 24 + .../insomniacslk/dhcp/dhcpv4/dhcpv4.go | 30 +- .../insomniacslk/dhcp/dhcpv4/fuzz.go | 41 - .../insomniacslk/dhcp/dhcpv4/modifiers.go | 7 + .../insomniacslk/dhcp/dhcpv4/options.go | 5 + .../dhcp/dhcpv4/server4/conn_unix.go | 4 + .../insomniacslk/dhcp/iana/entid.go | 10 +- .../insomniacslk/dhcp/rfc1035label/label.go | 12 +- vendor/github.com/josharian/native/doc.go | 8 + .../github.com/josharian/native/endian_big.go | 14 + .../josharian/native/endian_generic.go | 31 + .../josharian/native/endian_little.go | 14 + vendor/github.com/josharian/native/license | 7 + vendor/github.com/josharian/native/readme.md | 10 + vendor/github.com/pierrec/lz4/v4/.gitignore | 36 + vendor/github.com/pierrec/lz4/v4/LICENSE | 28 + vendor/github.com/pierrec/lz4/v4/README.md | 92 + .../pierrec/lz4/v4/internal/lz4block/block.go | 481 ++++ .../lz4/v4/internal/lz4block/blocks.go | 90 + .../lz4/v4/internal/lz4block/decode_amd64.s | 448 ++++ .../lz4/v4/internal/lz4block/decode_arm.s | 231 ++ .../lz4/v4/internal/lz4block/decode_arm64.s | 241 ++ .../lz4/v4/internal/lz4block/decode_asm.go | 10 + .../lz4/v4/internal/lz4block/decode_other.go | 139 + .../lz4/v4/internal/lz4errors/errors.go | 19 + .../lz4/v4/internal/lz4stream/block.go | 350 +++ .../lz4/v4/internal/lz4stream/frame.go | 204 ++ .../lz4/v4/internal/lz4stream/frame_gen.go | 103 + .../lz4/v4/internal/xxh32/xxh32zero.go | 212 ++ .../lz4/v4/internal/xxh32/xxh32zero_arm.go | 11 + .../lz4/v4/internal/xxh32/xxh32zero_arm.s | 251 ++ .../lz4/v4/internal/xxh32/xxh32zero_other.go | 10 + vendor/github.com/pierrec/lz4/v4/lz4.go | 157 ++ vendor/github.com/pierrec/lz4/v4/options.go | 214 ++ .../github.com/pierrec/lz4/v4/options_gen.go | 92 + vendor/github.com/pierrec/lz4/v4/reader.go | 275 ++ vendor/github.com/pierrec/lz4/v4/state.go | 75 + vendor/github.com/pierrec/lz4/v4/state_gen.go | 28 + vendor/github.com/pierrec/lz4/v4/writer.go | 238 ++ vendor/github.com/u-root/uio/rand/random.go | 2 +- .../u-root/uio/rand/random_linux.go | 4 +- .../u-root/uio/rand/random_urandom.go | 5 +- .../u-root/uio/ubinary/big_endian.go | 14 - vendor/github.com/u-root/uio/ubinary/doc.go | 6 - .../u-root/uio/ubinary/little_endian.go | 14 - .../u-root/uio/uio/archivereader.go | 85 + vendor/github.com/u-root/uio/uio/buffer.go | 43 +- vendor/github.com/u-root/uio/uio/null.go | 2 +- vendor/github.com/u-root/uio/uio/progress.go | 4 +- vendor/github.com/u-root/uio/uio/reader.go | 23 +- vendor/github.com/u-root/uio/ulog/log.go | 31 + .../vishvananda/netns/.golangci.yml | 2 + vendor/github.com/vishvananda/netns/README.md | 12 +- vendor/github.com/vishvananda/netns/doc.go | 9 + .../vishvananda/netns/netns_linux.go | 52 +- .../{netns_unspecified.go => netns_others.go} | 17 + .../netns/{netns.go => nshandle_linux.go} | 12 +- .../vishvananda/netns/nshandle_others.go | 45 + vendor/modules.txt | 33 +- 104 files changed, 14336 insertions(+), 213 deletions(-) create mode 100644 pkg/fileserver/plan9/serve.go create mode 100644 pkg/fileserver/server_unsupported.go create mode 100644 pkg/fileserver/server_windows.go create mode 100644 vendor/github.com/hugelgupf/p9/LICENSE create mode 100644 vendor/github.com/hugelgupf/p9/fsimpl/localfs/localfs.go create mode 100644 vendor/github.com/hugelgupf/p9/fsimpl/localfs/readdir.go create mode 100644 vendor/github.com/hugelgupf/p9/fsimpl/localfs/system_unix.go create mode 100644 vendor/github.com/hugelgupf/p9/fsimpl/localfs/system_windows.go create mode 100644 vendor/github.com/hugelgupf/p9/fsimpl/localfs/system_xattr.go create mode 100644 vendor/github.com/hugelgupf/p9/fsimpl/templatefs/readonly.go create mode 100644 vendor/github.com/hugelgupf/p9/fsimpl/templatefs/unimplfs.go create mode 100644 vendor/github.com/hugelgupf/p9/fsimpl/xattr/xattr.go create mode 100644 vendor/github.com/hugelgupf/p9/fsimpl/xattr/xattr_windows.go create mode 100644 vendor/github.com/hugelgupf/p9/internal/doc.go create mode 100644 vendor/github.com/hugelgupf/p9/internal/stat_bsd.go create mode 100644 vendor/github.com/hugelgupf/p9/internal/stat_openbsd.go create mode 100644 vendor/github.com/hugelgupf/p9/internal/stat_standard.go create mode 100644 vendor/github.com/hugelgupf/p9/internal/stat_unix.go create mode 100644 vendor/github.com/hugelgupf/p9/internal/stat_windows.go create mode 100644 vendor/github.com/hugelgupf/p9/linux/errno.go create mode 100644 vendor/github.com/hugelgupf/p9/linux/errors.go create mode 100644 vendor/github.com/hugelgupf/p9/linux/errors_linux.go create mode 100644 vendor/github.com/hugelgupf/p9/linux/errors_unix.go create mode 100644 vendor/github.com/hugelgupf/p9/linux/errors_windows.go create mode 100644 vendor/github.com/hugelgupf/p9/p9/buffer.go create mode 100644 vendor/github.com/hugelgupf/p9/p9/client.go create mode 100644 vendor/github.com/hugelgupf/p9/p9/client_file.go create mode 100644 vendor/github.com/hugelgupf/p9/p9/file.go create mode 100644 vendor/github.com/hugelgupf/p9/p9/fuzz.go create mode 100644 vendor/github.com/hugelgupf/p9/p9/handlers.go create mode 100644 vendor/github.com/hugelgupf/p9/p9/messages.go create mode 100644 vendor/github.com/hugelgupf/p9/p9/p9.go create mode 100644 vendor/github.com/hugelgupf/p9/p9/path_tree.go create mode 100644 vendor/github.com/hugelgupf/p9/p9/pool.go create mode 100644 vendor/github.com/hugelgupf/p9/p9/server.go create mode 100644 vendor/github.com/hugelgupf/p9/p9/transport.go create mode 100644 vendor/github.com/hugelgupf/p9/p9/version.go create mode 100644 vendor/github.com/hugelgupf/p9/vecnet/iov32_linux.go create mode 100644 vendor/github.com/hugelgupf/p9/vecnet/iov_linux.go create mode 100644 vendor/github.com/hugelgupf/p9/vecnet/vecnet.go create mode 100644 vendor/github.com/hugelgupf/p9/vecnet/vecnet_linux.go create mode 100644 vendor/github.com/hugelgupf/p9/vecnet/vecnet_other.go delete mode 100644 vendor/github.com/insomniacslk/dhcp/dhcpv4/fuzz.go create mode 100644 vendor/github.com/josharian/native/doc.go create mode 100644 vendor/github.com/josharian/native/endian_big.go create mode 100644 vendor/github.com/josharian/native/endian_generic.go create mode 100644 vendor/github.com/josharian/native/endian_little.go create mode 100644 vendor/github.com/josharian/native/license create mode 100644 vendor/github.com/josharian/native/readme.md create mode 100644 vendor/github.com/pierrec/lz4/v4/.gitignore create mode 100644 vendor/github.com/pierrec/lz4/v4/LICENSE create mode 100644 vendor/github.com/pierrec/lz4/v4/README.md create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s create mode 100644 vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go create mode 100644 vendor/github.com/pierrec/lz4/v4/lz4.go create mode 100644 vendor/github.com/pierrec/lz4/v4/options.go create mode 100644 vendor/github.com/pierrec/lz4/v4/options_gen.go create mode 100644 vendor/github.com/pierrec/lz4/v4/reader.go create mode 100644 vendor/github.com/pierrec/lz4/v4/state.go create mode 100644 vendor/github.com/pierrec/lz4/v4/state_gen.go create mode 100644 vendor/github.com/pierrec/lz4/v4/writer.go delete mode 100644 vendor/github.com/u-root/uio/ubinary/big_endian.go delete mode 100644 vendor/github.com/u-root/uio/ubinary/doc.go delete mode 100644 vendor/github.com/u-root/uio/ubinary/little_endian.go create mode 100644 vendor/github.com/u-root/uio/uio/archivereader.go create mode 100644 vendor/github.com/u-root/uio/ulog/log.go create mode 100644 vendor/github.com/vishvananda/netns/.golangci.yml create mode 100644 vendor/github.com/vishvananda/netns/doc.go rename vendor/github.com/vishvananda/netns/{netns_unspecified.go => netns_others.go} (63%) rename vendor/github.com/vishvananda/netns/{netns.go => nshandle_linux.go} (75%) create mode 100644 vendor/github.com/vishvananda/netns/nshandle_others.go diff --git a/.gitignore b/.gitignore index 49bac5ba7..753ea2bcc 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ -./bin/ +/bin/ capture.pcap tmp/ test/qcon.log diff --git a/cmd/gvproxy/main.go b/cmd/gvproxy/main.go index 8e6977513..52ae8641d 100644 --- a/cmd/gvproxy/main.go +++ b/cmd/gvproxy/main.go @@ -17,6 +17,7 @@ import ( "syscall" "time" + "github.com/containers/gvisor-tap-vsock/pkg/fileserver" "github.com/containers/gvisor-tap-vsock/pkg/net/stdio" "github.com/containers/gvisor-tap-vsock/pkg/sshclient" "github.com/containers/gvisor-tap-vsock/pkg/transport" @@ -44,6 +45,8 @@ var ( forwardIdentify arrayFlags sshPort int pidFile string + shareVolumes arrayFlags + hvsockShares map[string]string exitCode int ) @@ -70,6 +73,7 @@ func main() { flag.Var(&forwardUser, "forward-user", "SSH user to use for unix socket forward") flag.Var(&forwardIdentify, "forward-identity", "Path to SSH identity key for forwarding") flag.StringVar(&pidFile, "pid-file", "", "Generate a file with the PID in it") + flag.Var(&shareVolumes, "share-volume", "Share a volume to the guest virtual machine over 9p") flag.Parse() ctx, cancel := context.WithCancel(context.Background()) @@ -161,6 +165,20 @@ func main() { } } + // Verify syntax of requested volume shares + hvsockShares := make(map[string]string, len(shareVolumes)) + for i := 0; i < len(shareVolumes); i++ { + splitPath := strings.Split(shareVolumes[i], ":") + + if len(splitPath) < 2 { + exitWithError(errors.New("Share paths passed to --share-volume must include a vsock guid")) + } + + path := strings.Join(splitPath[:len(splitPath)-1], ":") + + hvsockShares[path] = splitPath[len(splitPath)-1] + } + // Create a PID file if requested if len(pidFile) > 0 { f, err := os.Create(pidFile) @@ -179,6 +197,11 @@ func main() { } } + // Start shares + if err := fileserver.StartShares(hvsockShares); err != nil { + exitWithError(err) + } + config := types.Configuration{ Debug: debug, CaptureFile: captureFile(), diff --git a/go.mod b/go.mod index 5cfedd976..040c47b41 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,8 @@ require ( github.com/coreos/stream-metadata-go v0.4.3 github.com/dustin/go-humanize v1.0.1 github.com/google/gopacket v1.1.19 - github.com/insomniacslk/dhcp v0.0.0-20220504074936-1ca156eafb9f + github.com/hugelgupf/p9 v0.3.1-0.20230822151754-54f5c5530921 + github.com/insomniacslk/dhcp v0.0.0-20230731140434-0f9eb93a696c github.com/linuxkit/virtsock v0.0.0-20220523201153-1a23e78aa7a2 github.com/mdlayher/vsock v1.2.1 github.com/miekg/dns v1.1.56 @@ -34,11 +35,13 @@ require ( github.com/fsnotify/fsnotify v1.4.9 // indirect github.com/google/btree v1.0.1 // indirect github.com/google/go-cmp v0.5.9 // indirect + github.com/josharian/native v1.1.0 // indirect github.com/mdlayher/socket v0.4.1 // indirect github.com/nxadm/tail v1.4.8 // indirect + github.com/pierrec/lz4/v4 v4.1.18 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/u-root/uio v0.0.0-20210528114334-82958018845c // indirect - github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 // indirect + github.com/u-root/uio v0.0.0-20230305220412-3e8cd9d6bf63 // indirect + github.com/vishvananda/netns v0.0.4 // indirect golang.org/x/mod v0.12.0 // indirect golang.org/x/net v0.15.0 // indirect golang.org/x/text v0.13.0 // indirect diff --git a/go.sum b/go.sum index 7dff2fe39..9223b52ef 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,6 @@ github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s= github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= github.com/armon/go-proxyproto v0.0.0-20210323213023-7e956b284f0a/go.mod h1:QmP9hvJ91BbJmGVGSbutW19IC0Q9phDCLGaomwTJbgU= @@ -7,12 +8,12 @@ github.com/containers/winquit v1.1.0 h1:jArun04BNDQvt2W0Y78kh9TazN2EIEMG5Im6/JY7 github.com/containers/winquit v1.1.0/go.mod h1:PsPeZlnbkmGGIToMPHF1zhWjBUkd8aHjMOr/vFcPxw8= github.com/coreos/stream-metadata-go v0.4.3 h1:5GykJ8dtZSx1rdlzEAiDVzA73cwmUF3ceTuIP293L6E= github.com/coreos/stream-metadata-go v0.4.3/go.mod h1:fMObQqQm8Ku91G04btKzEH3AsdP1mrAb986z9aaK0tE= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/fanliao/go-promise v0.0.0-20141029170127-1890db352a72/go.mod h1:PjfxuH4FZdUyfMdtBio2lsRr1AKEaVPwelzuHuh8Lqc= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -29,37 +30,33 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= +github.com/google/goterm v0.0.0-20200907032337-555d40f16ae2 h1:CVuJwN34x4xM2aT4sIKhmeib40NeBPhRihNjQmpJsA4= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/hugelgupf/socketpair v0.0.0-20190730060125-05d35a94e714/go.mod h1:2Goc3h8EklBH5mspfHFxBnEoURQCGzQQH1ga9Myjvis= -github.com/insomniacslk/dhcp v0.0.0-20220504074936-1ca156eafb9f h1:l1QCwn715k8nYkj4Ql50rzEog3WnMdrd4YYMMwemxEo= -github.com/insomniacslk/dhcp v0.0.0-20220504074936-1ca156eafb9f/go.mod h1:h+MxyHxRg9NH3terB1nfRIUaQEcI0XOVkdR9LNBlp8E= -github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= -github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= -github.com/jsimonetti/rtnetlink v0.0.0-20201009170750-9c6f07d100c1/go.mod h1:hqoO/u39cqLeBLebZ8fWdE96O7FxrAsRYhnVOdgHxok= -github.com/jsimonetti/rtnetlink v0.0.0-20201110080708-d2c240429e6c/go.mod h1:huN4d1phzjhlOsNIjFsw2SVRbwIHj3fJDMEU2SDPTmg= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/hugelgupf/p9 v0.3.1-0.20230822151754-54f5c5530921 h1:cfYGdNpXGZobTSSDFB+wx2FRfWptM7sCkScJgVx0Tkk= +github.com/hugelgupf/p9 v0.3.1-0.20230822151754-54f5c5530921/go.mod h1:nMr69J6AmirlSvzeVLK7gj4DUY1oYtSwcSiSJ7BBb0A= +github.com/hugelgupf/socketpair v0.0.0-20230822150718-707395b1939a h1:Nq7wDsqsVBUBfGn8yB1M028ShWTKTtZBcafaTJ35N0s= +github.com/hugelgupf/vmtest v0.0.0-20230810222836-f8c8e381617c h1:4A+BVHylCBQPxlW1NrUITDpRAHCeX6QSZHmzzFQqliU= +github.com/insomniacslk/dhcp v0.0.0-20230731140434-0f9eb93a696c h1:P/3mFnHCv1A/ej4m8pF5EB6FUt9qEL2Q9lfrcUNwCYs= +github.com/insomniacslk/dhcp v0.0.0-20230731140434-0f9eb93a696c/go.mod h1:7474bZ1YNCvarT6WFKie4kEET6J0KYRDC4XJqqXzQW4= +github.com/josharian/native v1.0.1-0.20221213033349-c1e37c09b531/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA= +github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= +github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/linuxkit/virtsock v0.0.0-20220523201153-1a23e78aa7a2 h1:DZMFueDbfz6PNc1GwDRA8+6lBx1TB9UnxDQliCqR73Y= github.com/linuxkit/virtsock v0.0.0-20220523201153-1a23e78aa7a2/go.mod h1:SWzULI85WerrFt3u+nIm5F9l7EvxZTKQvd0InF3nmgM= -github.com/mdlayher/ethernet v0.0.0-20190606142754-0394541c37b7 h1:lez6TS6aAau+8wXUP3G9I3TGlmPFEq2CTxBaRqY6AGE= -github.com/mdlayher/ethernet v0.0.0-20190606142754-0394541c37b7/go.mod h1:U6ZQobyTjI/tJyq2HG+i/dfSoFUt8/aZCM+GKtmFk/Y= -github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= -github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= -github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= -github.com/mdlayher/netlink v1.1.1/go.mod h1:WTYpFb/WTvlRJAyKhZL5/uy69TDDpHHu2VZmb2XgV7o= -github.com/mdlayher/raw v0.0.0-20190606142536-fef19f00fc18/go.mod h1:7EpbotpCmVZcu+KCX4g9WaRNuu11uyhiW7+Le1dKawg= -github.com/mdlayher/raw v0.0.0-20191009151244-50f2db8cc065 h1:aFkJ6lx4FPip+S+Uw4aTegFMct9shDvP+79PsSxpm3w= -github.com/mdlayher/raw v0.0.0-20191009151244-50f2db8cc065/go.mod h1:7EpbotpCmVZcu+KCX4g9WaRNuu11uyhiW7+Le1dKawg= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mdlayher/packet v1.1.2 h1:3Up1NG6LZrsgDVn6X4L9Ge/iyRyxFEFD9o6Pr3Q1nQY= github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ= @@ -80,55 +77,52 @@ github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= +github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/songgao/packets v0.0.0-20160404182456-549a10cd4091 h1:1zN6ImoqhSJhN8hGXFaJlSC8msLmIbX8bFqOfWLKw0w= github.com/songgao/packets v0.0.0-20160404182456-549a10cd4091/go.mod h1:N20Z5Y8oye9a7HmytmZ+tr8Q2vlP0tAHP13kTHzwvQY= github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 h1:TG/diQgUe0pntT/2D9tmUCz4VNwm9MfrtPr0SU2qSX8= github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/u-root/uio v0.0.0-20210528114334-82958018845c h1:BFvcl34IGnw8yvJi8hlqLFo9EshRInwWBs2M5fGWzQA= -github.com/u-root/uio v0.0.0-20210528114334-82958018845c/go.mod h1:LpEX5FO/cB+WF4TYGY1V5qktpaZLkKkSegbr0V4eYXA= +github.com/u-root/gobusybox/src v0.0.0-20230806212452-e9366a5b9fdc h1:udgfN9Qy573qgHWMEORFgy6YXNDiN/Fd5LlKdlp+/Mo= +github.com/u-root/u-root v0.11.1-0.20230807200058-f87ad7ccb594 h1:1AIJqOtdEufYfGb3eRpdaqWONzBOpAwrg1fehbWg+Mg= +github.com/u-root/uio v0.0.0-20230305220412-3e8cd9d6bf63 h1:YcojQL98T/OO+rybuzn2+5KrD5dBwXIvYBvQ2cD3Avg= +github.com/u-root/uio v0.0.0-20230305220412-3e8cd9d6bf63/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264= +github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg= -github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= +github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/exp v0.0.0-20230810033253-352e893a4cad h1:g0bG7Z4uG+OgH2QDODnjp6ggkk1bJDsINcuWmJN1iJU= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190419010253-1f3472d942ba/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -138,25 +132,16 @@ golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190418153312-f0ce4c0180be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606122018-79a91cf218c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220622161953-175b2fd9d664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -168,7 +153,6 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -200,3 +184,4 @@ gvisor.dev/gvisor v0.0.0-20230715022000-fd277b20b8db h1:WZSmkyu/hep9YhWIlBZefwGV gvisor.dev/gvisor v0.0.0-20230715022000-fd277b20b8db/go.mod h1:sQuqOkxbfJq/GS2uSnqHphtXclHyk/ZrAGhZBxxsq6g= inet.af/tcpproxy v0.0.0-20220326234310-be3ee21c9fa0 h1:PqdHrvQRVK1zapJkd0qf6+tevvSIcWdfenVqJd3PHWU= inet.af/tcpproxy v0.0.0-20220326234310-be3ee21c9fa0/go.mod h1:Tojt5kmHpDIR2jMojxzZK2w2ZR7OILODmUo2gaSwjrk= +src.elv.sh v0.16.0-rc1.0.20220116211855-fda62502ad7f h1:pjVeIo9Ba6K1Wy+rlwX91zT7A+xGEmxiNRBdN04gDTQ= diff --git a/pkg/fileserver/plan9/serve.go b/pkg/fileserver/plan9/serve.go new file mode 100644 index 000000000..903226a5e --- /dev/null +++ b/pkg/fileserver/plan9/serve.go @@ -0,0 +1,95 @@ +package plan9 + +import ( + "fmt" + "net" + "os" + "path/filepath" + + "github.com/hugelgupf/p9/fsimpl/localfs" + "github.com/hugelgupf/p9/p9" + "github.com/sirupsen/logrus" + "github.com/pkg/errors" +) + +type Plan9Server struct { + server *p9.Server + // TODO: Once server has a proper Close() we don't need this. + // This is basically just a short-circuit to actually close the server + // without that ability. + listener net.Listener + // Errors from the server being started will come out here. + errChan chan error +} + +// Expose a single directory (and all children) via the given net.Listener. +// Directory given must be an absolute path and must exist. +func New9pServer(listener net.Listener, exposeDir string) (*Plan9Server, error) { + // Verify that exposeDir makes sense. + if !filepath.IsAbs(exposeDir) { + return nil, fmt.Errorf("path to expose to machine must be absolute: %s", exposeDir) + } + stat, err := os.Stat(exposeDir) + if err != nil { + return nil, errors.Wrapf(err, "cannot stat path to expose to machine") + } + if !stat.IsDir() { + return nil, fmt.Errorf("path to expose to machine must be a directory: %s", exposeDir) + } + + server := p9.NewServer(localfs.Attacher(exposeDir), []p9.ServerOpt{}...) + if server == nil { + return nil, fmt.Errorf("p9.NewServer returned nil") + } + + errChan := make(chan error) + + // TODO: Use a channel to pass back this if it occurs within a + // reasonable timeframe. + go func() { + errChan <- server.Serve(listener) + close(errChan) + }() + + toReturn := new(Plan9Server) + toReturn.listener = listener + toReturn.server = server + toReturn.errChan = errChan + + // Just before returning, check to see if we got an error off server + // startup. + select { + case err := <-errChan: + return nil, errors.Wrapf(err, "starting 9p server") + default: + logrus.Infof("Successfully started 9p server for directory %s", exposeDir) + } + + return toReturn, nil +} + +// Stop a running server. +// Please note that this does *BAD THINGS* to clients if they are still running +// when the server stops. Processes get stuck in I/O deep sleep and zombify, and +// nothing I do save restarting the VM can remove the zombies. +func (s *Plan9Server) Stop() error { + if s.server != nil { + if err := s.listener.Close(); err != nil { + return err + } + s.server = nil + } + + return nil +} + +// Wait for an error from a running server. +func (s *Plan9Server) WaitForError() error { + if s.server != nil { + err := <-s.errChan + return err + } + + // Server already down, return nil + return nil +} diff --git a/pkg/fileserver/server_unsupported.go b/pkg/fileserver/server_unsupported.go new file mode 100644 index 000000000..cfa755f41 --- /dev/null +++ b/pkg/fileserver/server_unsupported.go @@ -0,0 +1,12 @@ +//go:build !windows +// +build !windows + +package fileserver + +import ( + "fmt" +) + +func StartShares(mounts map[string]string) error { + return fmt.Errorf("this platform does not support sharing directories") +} diff --git a/pkg/fileserver/server_windows.go b/pkg/fileserver/server_windows.go new file mode 100644 index 000000000..68577fa0b --- /dev/null +++ b/pkg/fileserver/server_windows.go @@ -0,0 +1,61 @@ +package fileserver + +import ( + "github.com/containers/gvisor-tap-vsock/pkg/fileserver/plan9" + "github.com/linuxkit/virtsock/pkg/hvsock" + "github.com/sirupsen/logrus" + "github.com/pkg/errors" +) + +// Start serving the given shares on Windows HVSocks for use by a Hyper-V VM. +// Mounts is formatted as a map of directory to be shared to vsock GUID. +// The vsocks used must already be defined before StartShares is called; it's +// expected that the vsocks will be created and torn down by the program calling +// gvproxy. +// TODO: The map here probably doesn't make sense. +// In the future, possibly accept a struct instead, so we can accept things +// other than a vsock and support non-Windows OSes. +func StartShares(mounts map[string]string) (defErr error) { + for path, guid := range mounts { + service, err := hvsock.GUIDFromString(guid) + if err != nil { + return errors.Wrapf(err, "parsing vsock guid %s: %w", guid) + } + + listener, err := hvsock.Listen(hvsock.Addr{ + VMID: hvsock.GUIDWildcard, + ServiceID: service, + }) + if err != nil { + return errors.Wrapf(err, "retrieving listener for vsock %s: %w", guid) + } + + logrus.Debugf("Going to serve directory %s on vsock %s", path, guid) + + server, err := plan9.New9pServer(listener, path) + if err != nil { + return errors.Wrapf(err, "serving directory %s on vsock %s: %w", path, guid) + } + defer func() { + if defErr != nil { + if err := server.Stop(); err != nil { + logrus.Errorf("Error stopping 9p server: %v", err) + } + } + }() + + serverDir := path + + go func() { + if err := server.WaitForError(); err != nil { + logrus.Errorf("Error from 9p server for %s: %v", path, err) + } else { + // We do not expect server exits - this should + // run until the program exits. + logrus.Warnf("9p server for %s exited without error", serverDir) + } + }() + } + + return nil +} diff --git a/vendor/github.com/hugelgupf/p9/LICENSE b/vendor/github.com/hugelgupf/p9/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/hugelgupf/p9/fsimpl/localfs/localfs.go b/vendor/github.com/hugelgupf/p9/fsimpl/localfs/localfs.go new file mode 100644 index 000000000..c8ef1b779 --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/fsimpl/localfs/localfs.go @@ -0,0 +1,281 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package localfs exposes the host's local file system as a p9.File. +package localfs + +import ( + "os" + "path" + + "github.com/hugelgupf/p9/fsimpl/templatefs" + "github.com/hugelgupf/p9/internal" + "github.com/hugelgupf/p9/linux" + "github.com/hugelgupf/p9/p9" +) + +type attacher struct { + root string +} + +var ( + _ p9.Attacher = &attacher{} +) + +// RootAttacher attaches at the host file system's root. +func RootAttacher() p9.Attacher { + return &attacher{root: "/"} +} + +// Attacher returns an attacher that exposes files under root. +func Attacher(root string) p9.Attacher { + if len(root) == 0 { + root = "/" + } + return &attacher{root: root} +} + +// Attach implements p9.Attacher.Attach. +func (a *attacher) Attach() (p9.File, error) { + umask(0) + return &Local{path: a.root}, nil +} + +// Local is a p9.File. +type Local struct { + p9.DefaultWalkGetAttr + templatefs.NoopFile + + path string + file *os.File +} + +var ( + _ p9.File = &Local{} +) + +// info constructs a QID for this file. +func (l *Local) info() (p9.QID, os.FileInfo, error) { + var ( + qid p9.QID + fi os.FileInfo + err error + ) + + // Stat the file. + if l.file != nil { + fi, err = l.file.Stat() + } else { + fi, err = os.Lstat(l.path) + } + if err != nil { + return qid, nil, err + } + + // Construct the QID type. + qid.Type = p9.ModeFromOS(fi.Mode()).QIDType() + + // Save the path from the Ino. + ninePath, err := localToQid(l.path, fi) + if err != nil { + return qid, nil, err + } + + qid.Path = ninePath + + return qid, fi, nil +} + +// Walk implements p9.File.Walk. +func (l *Local) Walk(names []string) ([]p9.QID, p9.File, error) { + var qids []p9.QID + last := &Local{path: l.path} + + // A walk with no names is a copy of self. + if len(names) == 0 { + return nil, last, nil + } + + for _, name := range names { + c := &Local{path: path.Join(last.path, name)} + qid, _, err := c.info() + if err != nil { + return nil, nil, err + } + qids = append(qids, qid) + last = c + } + return qids, last, nil +} + +// FSync implements p9.File.FSync. +func (l *Local) FSync() error { + return l.file.Sync() +} + +// GetAttr implements p9.File.GetAttr. +// +// Not fully implemented. +func (l *Local) GetAttr(req p9.AttrMask) (p9.QID, p9.AttrMask, p9.Attr, error) { + qid, fi, err := l.info() + if err != nil { + return qid, p9.AttrMask{}, p9.Attr{}, err + } + + stat := internal.InfoToStat(fi) + attr := &p9.Attr{ + Mode: p9.FileMode(stat.Mode), + UID: p9.UID(stat.Uid), + GID: p9.GID(stat.Gid), + NLink: p9.NLink(stat.Nlink), + RDev: p9.Dev(stat.Rdev), + Size: uint64(stat.Size), + BlockSize: uint64(stat.Blksize), + Blocks: uint64(stat.Blocks), + ATimeSeconds: uint64(stat.Atim.Sec), + ATimeNanoSeconds: uint64(stat.Atim.Nsec), + MTimeSeconds: uint64(stat.Mtim.Sec), + MTimeNanoSeconds: uint64(stat.Mtim.Nsec), + CTimeSeconds: uint64(stat.Ctim.Sec), + CTimeNanoSeconds: uint64(stat.Ctim.Nsec), + } + return qid, req, *attr, nil +} + +// Close implements p9.File.Close. +func (l *Local) Close() error { + if l.file != nil { + // We don't set l.file = nil, as Close is called by servers + // only in Clunk. Clunk should release the last (direct) + // reference to this file. + return l.file.Close() + } + return nil +} + +// Open implements p9.File.Open. +func (l *Local) Open(mode p9.OpenFlags) (p9.QID, uint32, error) { + qid, _, err := l.info() + if err != nil { + return qid, 0, err + } + + // Do the actual open. + f, err := os.OpenFile(l.path, int(mode), 0) + if err != nil { + return qid, 0, err + } + l.file = f + + return qid, 0, nil +} + +// ReadAt implements p9.File.ReadAt. +func (l *Local) ReadAt(p []byte, offset int64) (int, error) { + return l.file.ReadAt(p, offset) +} + +// Lock implements p9.File.Lock. +func (l *Local) Lock(pid int, locktype p9.LockType, flags p9.LockFlags, start, length uint64, client string) (p9.LockStatus, error) { + return l.lock(pid, locktype, flags, start, length, client) +} + +// WriteAt implements p9.File.WriteAt. +func (l *Local) WriteAt(p []byte, offset int64) (int, error) { + return l.file.WriteAt(p, offset) +} + +// Create implements p9.File.Create. +func (l *Local) Create(name string, mode p9.OpenFlags, permissions p9.FileMode, _ p9.UID, _ p9.GID) (p9.File, p9.QID, uint32, error) { + newName := path.Join(l.path, name) + f, err := os.OpenFile(newName, int(mode)|os.O_CREATE|os.O_EXCL, os.FileMode(permissions)) + if err != nil { + return nil, p9.QID{}, 0, err + } + + l2 := &Local{path: newName, file: f} + qid, _, err := l2.info() + if err != nil { + l2.Close() + return nil, p9.QID{}, 0, err + } + return l2, qid, 0, nil +} + +// Mkdir implements p9.File.Mkdir. +// +// Not properly implemented. +func (l *Local) Mkdir(name string, permissions p9.FileMode, _ p9.UID, _ p9.GID) (p9.QID, error) { + if err := os.Mkdir(path.Join(l.path, name), os.FileMode(permissions)); err != nil { + return p9.QID{}, err + } + + // Blank QID. + return p9.QID{}, nil +} + +// Symlink implements p9.File.Symlink. +// +// Not properly implemented. +func (l *Local) Symlink(oldname string, newname string, _ p9.UID, _ p9.GID) (p9.QID, error) { + if err := os.Symlink(oldname, path.Join(l.path, newname)); err != nil { + return p9.QID{}, err + } + + // Blank QID. + return p9.QID{}, nil +} + +// Link implements p9.File.Link. +// +// Not properly implemented. +func (l *Local) Link(target p9.File, newname string) error { + return os.Link(target.(*Local).path, path.Join(l.path, newname)) +} + +// RenameAt implements p9.File.RenameAt. +func (l *Local) RenameAt(oldName string, newDir p9.File, newName string) error { + oldPath := path.Join(l.path, oldName) + newPath := path.Join(newDir.(*Local).path, newName) + + return os.Rename(oldPath, newPath) +} + +// Readlink implements p9.File.Readlink. +// +// Not properly implemented. +func (l *Local) Readlink() (string, error) { + return os.Readlink(l.path) +} + +// Renamed implements p9.File.Renamed. +func (l *Local) Renamed(parent p9.File, newName string) { + l.path = path.Join(parent.(*Local).path, newName) +} + +// SetAttr implements p9.File.SetAttr. +func (l *Local) SetAttr(valid p9.SetAttrMask, attr p9.SetAttr) error { + // When truncate(2) is called on Linux, Linux will try to set time & size. Fake it. Sorry. + supported := p9.SetAttrMask{Size: true, MTime: true, CTime: true} + if !valid.IsSubsetOf(supported) { + return linux.ENOSYS + } + + if valid.Size { + // If more than one thing is ever implemented, we can't just + // return an error here. + return os.Truncate(l.path, int64(attr.Size)) + } + return nil +} diff --git a/vendor/github.com/hugelgupf/p9/fsimpl/localfs/readdir.go b/vendor/github.com/hugelgupf/p9/fsimpl/localfs/readdir.go new file mode 100644 index 000000000..6bb0edf7e --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/fsimpl/localfs/readdir.go @@ -0,0 +1,50 @@ +package localfs + +import ( + "io" + "path" + + "github.com/hugelgupf/p9/p9" +) + +// Readdir implements p9.File.Readdir. +func (l *Local) Readdir(offset uint64, count uint32) (p9.Dirents, error) { + var ( + p9Ents = make([]p9.Dirent, 0) + cursor = uint64(0) + ) + + for len(p9Ents) < int(count) { + singleEnt, err := l.file.Readdirnames(1) + + if err == io.EOF { + return p9Ents, nil + } else if err != nil { + return nil, err + } + + // we consumed an entry + cursor++ + + // cursor \in (offset, offset+count) + if cursor < offset || cursor > offset+uint64(count) { + continue + } + + name := singleEnt[0] + + localEnt := Local{path: path.Join(l.path, name)} + qid, _, err := localEnt.info() + if err != nil { + return p9Ents, err + } + p9Ents = append(p9Ents, p9.Dirent{ + QID: qid, + Type: qid.Type, + Name: name, + Offset: cursor, + }) + } + + return p9Ents, nil +} diff --git a/vendor/github.com/hugelgupf/p9/fsimpl/localfs/system_unix.go b/vendor/github.com/hugelgupf/p9/fsimpl/localfs/system_unix.go new file mode 100644 index 000000000..ad82a12d8 --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/fsimpl/localfs/system_unix.go @@ -0,0 +1,40 @@ +//go:build !windows +// +build !windows + +package localfs + +import ( + "os" + "syscall" + + "github.com/hugelgupf/p9/p9" + "golang.org/x/sys/unix" +) + +func umask(mask int) int { + return syscall.Umask(mask) +} + +func localToQid(_ string, fi os.FileInfo) (uint64, error) { + return uint64(fi.Sys().(*syscall.Stat_t).Ino), nil +} + +// lock implements p9.File.Lock. +func (l *Local) lock(pid int, locktype p9.LockType, flags p9.LockFlags, start, length uint64, client string) (p9.LockStatus, error) { + switch locktype { + case p9.ReadLock, p9.WriteLock: + if err := unix.Flock(int(l.file.Fd()), unix.LOCK_EX); err != nil { + return p9.LockStatusError, nil + } + + case p9.Unlock: + if err := unix.Flock(int(l.file.Fd()), unix.LOCK_EX); err != nil { + return p9.LockStatusError, nil + } + + default: + return p9.LockStatusOK, unix.ENOSYS + } + + return p9.LockStatusOK, nil +} diff --git a/vendor/github.com/hugelgupf/p9/fsimpl/localfs/system_windows.go b/vendor/github.com/hugelgupf/p9/fsimpl/localfs/system_windows.go new file mode 100644 index 000000000..e845b2e0a --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/fsimpl/localfs/system_windows.go @@ -0,0 +1,50 @@ +package localfs + +import ( + "os" + + "github.com/hugelgupf/p9/linux" + "github.com/hugelgupf/p9/p9" + "golang.org/x/sys/windows" +) + +func umask(_ int) int { + return 0 +} + +func localToQid(path string, info os.FileInfo) (uint64, error) { + pathPtr, err := windows.UTF16PtrFromString(path) + if err != nil { + return 0, err + } + + var ( + access uint32 // none; we only need metadata + sharemode uint32 + createmode uint32 = windows.OPEN_EXISTING + attribute uint32 = windows.FILE_ATTRIBUTE_NORMAL + ) + if info.IsDir() { + attribute = windows.FILE_FLAG_BACKUP_SEMANTICS + } + fd, err := windows.CreateFile(pathPtr, access, sharemode, nil, createmode, attribute, 0) + if err != nil { + return 0, err + } + + fi := &windows.ByHandleFileInformation{} + if err = windows.GetFileInformationByHandle(fd, fi); err != nil { + return 0, err + } + + x := uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow) + return x, nil +} + +// lock implements p9.File.Lock. +// As in FreeBSD NFS locking, we just say "sure, we did it" without actually +// doing anything; this lock design makes even less sense on Windows than +// it does on Linux (pid? really? what were they thinking?) +func (l *Local) lock(pid int, locktype p9.LockType, flags p9.LockFlags, start, length uint64, client string) (p9.LockStatus, error) { + return p9.LockStatusOK, linux.ENOSYS +} diff --git a/vendor/github.com/hugelgupf/p9/fsimpl/localfs/system_xattr.go b/vendor/github.com/hugelgupf/p9/fsimpl/localfs/system_xattr.go new file mode 100644 index 000000000..00057bd9c --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/fsimpl/localfs/system_xattr.go @@ -0,0 +1,25 @@ +//go:build unix && !solaris && !openbsd + +package localfs + +import ( + "github.com/hugelgupf/p9/fsimpl/xattr" + "github.com/hugelgupf/p9/p9" + "golang.org/x/sys/unix" +) + +func (l *Local) SetXattr(attr string, data []byte, flags p9.XattrFlags) error { + return unix.Setxattr(l.path, attr, data, int(flags)) +} + +func (l *Local) ListXattrs() ([]string, error) { + return xattr.List(l.path) +} + +func (l *Local) GetXattr(attr string) ([]byte, error) { + return xattr.Get(l.path, attr) +} + +func (l *Local) RemoveXattr(attr string) error { + return unix.Removexattr(l.path, attr) +} diff --git a/vendor/github.com/hugelgupf/p9/fsimpl/templatefs/readonly.go b/vendor/github.com/hugelgupf/p9/fsimpl/templatefs/readonly.go new file mode 100644 index 000000000..b9223e39d --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/fsimpl/templatefs/readonly.go @@ -0,0 +1,194 @@ +package templatefs + +import ( + "github.com/hugelgupf/p9/linux" + "github.com/hugelgupf/p9/p9" +) + +// NotSymlinkFile denies Readlink with EINVAL. +// +// EINVAL is returned by readlink(2) when the file is not a symlink. +type NotSymlinkFile struct{} + +// Readlink implements p9.File.Readlink. +func (NotSymlinkFile) Readlink() (string, error) { + return "", linux.EINVAL +} + +// NotDirectoryFile denies any directory operations with ENOTDIR. +// +// Those operations are Create, Mkdir, Symlink, Link, Mknod, RenameAt, +// UnlinkAt, and Readdir. +type NotDirectoryFile struct{} + +// Create implements p9.File.Create. +func (NotDirectoryFile) Create(name string, mode p9.OpenFlags, permissions p9.FileMode, _ p9.UID, _ p9.GID) (p9.File, p9.QID, uint32, error) { + return nil, p9.QID{}, 0, linux.ENOTDIR +} + +// Mkdir implements p9.File.Mkdir. +func (NotDirectoryFile) Mkdir(name string, permissions p9.FileMode, _ p9.UID, _ p9.GID) (p9.QID, error) { + return p9.QID{}, linux.ENOTDIR +} + +// Symlink implements p9.File.Symlink. +func (NotDirectoryFile) Symlink(oldname string, newname string, _ p9.UID, _ p9.GID) (p9.QID, error) { + return p9.QID{}, linux.ENOTDIR +} + +// Link implements p9.File.Link. +func (NotDirectoryFile) Link(target p9.File, newname string) error { + return linux.ENOTDIR +} + +// Mknod implements p9.File.Mknod. +func (NotDirectoryFile) Mknod(name string, mode p9.FileMode, major uint32, minor uint32, _ p9.UID, _ p9.GID) (p9.QID, error) { + return p9.QID{}, linux.ENOTDIR +} + +// RenameAt implements p9.File.RenameAt. +func (NotDirectoryFile) RenameAt(oldname string, newdir p9.File, newname string) error { + return linux.ENOTDIR +} + +// UnlinkAt implements p9.File.UnlinkAt. +func (NotDirectoryFile) UnlinkAt(name string, flags uint32) error { + return linux.ENOTDIR +} + +// Readdir implements p9.File.Readdir. +func (NotDirectoryFile) Readdir(offset uint64, count uint32) (p9.Dirents, error) { + return nil, linux.ENOTDIR +} + +// ReadOnlyFile returns default denials for all methods except Open, ReadAt, +// Walk, Close, and GetAttr. +// +// Returns EROFS for most modifying operations, ENOTDIR for file creation ops +// or readdir, EINVAL for readlink, xattr and lock operations return ENOSYS. +// +// Does nothing for Renamed. +type ReadOnlyFile struct { + NotSymlinkFile + NotDirectoryFile + XattrUnimplemented + NoopRenamed + NotLockable +} + +// FSync implements p9.File.FSync. +func (ReadOnlyFile) FSync() error { + return linux.EROFS +} + +// SetAttr implements p9.File.SetAttr. +func (ReadOnlyFile) SetAttr(valid p9.SetAttrMask, attr p9.SetAttr) error { + return linux.EROFS +} + +// Remove implements p9.File.Remove. +func (ReadOnlyFile) Remove() error { + return linux.EROFS +} + +// Rename implements p9.File.Rename. +func (ReadOnlyFile) Rename(directory p9.File, name string) error { + return linux.EROFS +} + +// WriteAt implements p9.File.WriteAt. +func (ReadOnlyFile) WriteAt(p []byte, offset int64) (int, error) { + return 0, linux.EROFS +} + +// Flush implements p9.File.Flush. +func (ReadOnlyFile) Flush() error { + return nil +} + +// ReadOnlyDir implements default denials for all methods except Walk, Open, +// GetAttr, Readdir, Close. +// +// Creation operations return EROFS. Read/write operations return EISDIR. +// EINVAL for readlink. Renaming does nothing by default, xattr and locking are +// unimplemented. +type ReadOnlyDir struct { + NotSymlinkFile + IsDir + XattrUnimplemented + NoopRenamed + NotLockable +} + +// Create implements p9.File.Create. +func (ReadOnlyDir) Create(name string, mode p9.OpenFlags, permissions p9.FileMode, _ p9.UID, _ p9.GID) (p9.File, p9.QID, uint32, error) { + return nil, p9.QID{}, 0, linux.EROFS +} + +// Mkdir implements p9.File.Mkdir. +func (ReadOnlyDir) Mkdir(name string, permissions p9.FileMode, _ p9.UID, _ p9.GID) (p9.QID, error) { + return p9.QID{}, linux.EROFS +} + +// Symlink implements p9.File.Symlink. +func (ReadOnlyDir) Symlink(oldname string, newname string, _ p9.UID, _ p9.GID) (p9.QID, error) { + return p9.QID{}, linux.EROFS +} + +// Link implements p9.File.Link. +func (ReadOnlyDir) Link(target p9.File, newname string) error { + return linux.EROFS +} + +// Mknod implements p9.File.Mknod. +func (ReadOnlyDir) Mknod(name string, mode p9.FileMode, major uint32, minor uint32, _ p9.UID, _ p9.GID) (p9.QID, error) { + return p9.QID{}, linux.EROFS +} + +// RenameAt implements p9.File.RenameAt. +func (ReadOnlyDir) RenameAt(oldname string, newdir p9.File, newname string) error { + return linux.EROFS +} + +// UnlinkAt implements p9.File.UnlinkAt. +func (ReadOnlyDir) UnlinkAt(name string, flags uint32) error { + return linux.EROFS +} + +// Readdir implements p9.File.Readdir. +func (ReadOnlyDir) Readdir(offset uint64, count uint32) (p9.Dirents, error) { + return nil, linux.EROFS +} + +// FSync implements p9.File.FSync. +func (ReadOnlyDir) FSync() error { + return linux.EROFS +} + +// SetAttr implements p9.File.SetAttr. +func (ReadOnlyDir) SetAttr(valid p9.SetAttrMask, attr p9.SetAttr) error { + return linux.EROFS +} + +// Remove implements p9.File.Remove. +func (ReadOnlyDir) Remove() error { + return linux.EROFS +} + +// Rename implements p9.File.Rename. +func (ReadOnlyDir) Rename(directory p9.File, name string) error { + return linux.EROFS +} + +// IsDir returns EISDIR for ReadAt and WriteAt. +type IsDir struct{} + +// WriteAt implements p9.File.WriteAt. +func (IsDir) WriteAt(p []byte, offset int64) (int, error) { + return 0, linux.EISDIR +} + +// ReadAt implements p9.File.ReadAt. +func (IsDir) ReadAt(p []byte, offset int64) (int, error) { + return 0, linux.EISDIR +} diff --git a/vendor/github.com/hugelgupf/p9/fsimpl/templatefs/unimplfs.go b/vendor/github.com/hugelgupf/p9/fsimpl/templatefs/unimplfs.go new file mode 100644 index 000000000..a12edf58c --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/fsimpl/templatefs/unimplfs.go @@ -0,0 +1,193 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package templatefs provides template p9.Files. +// +// NoopFile can be used to leave some methods unimplemented in incomplete +// p9.File implementations. +// +// NilCloser, ReadOnlyFile, NotDirectoryFile, and NotSymlinkFile can be used as +// templates for commonly implemented file types. They are careful not to +// conflict with each others' methods, so they can be embedded together. +package templatefs + +import ( + "github.com/hugelgupf/p9/linux" + "github.com/hugelgupf/p9/p9" +) + +// NilCloser returns nil for Close. +type NilCloser struct{} + +// Close implements p9.File.Close. +func (NilCloser) Close() error { + return nil +} + +// NilSyncer returns nil for FSync. +type NilSyncer struct{} + +// FSync implements p9.File.FSync. +func (NilSyncer) FSync() error { + return nil +} + +// NoopRenamed does nothing when the file is renamed. +type NoopRenamed struct{} + +// Renamed implements p9.File.Renamed. +func (NoopRenamed) Renamed(parent p9.File, newName string) {} + +// NotImplementedFile is a p9.File that returns ENOSYS for every listed method. +// +// Compatible with NoopRenamed, NilCloser, and NilSyncer. +type NotImplementedFile struct { + p9.DefaultWalkGetAttr + NotLockable + XattrUnimplemented +} + +// NoopFile is a p9.File with every method unimplemented. +type NoopFile struct { + NotImplementedFile + NilCloser + NilSyncer + NoopRenamed +} + +var ( + _ p9.File = &NoopFile{} +) + +// Walk implements p9.File.Walk. +func (NotImplementedFile) Walk(names []string) ([]p9.QID, p9.File, error) { + return nil, nil, linux.ENOSYS +} + +// StatFS implements p9.File.StatFS. +// +// Not implemented. +func (NotImplementedFile) StatFS() (p9.FSStat, error) { + return p9.FSStat{}, linux.ENOSYS +} + +// Open implements p9.File.Open. +func (NotImplementedFile) Open(mode p9.OpenFlags) (p9.QID, uint32, error) { + return p9.QID{}, 0, linux.ENOSYS +} + +// ReadAt implements p9.File.ReadAt. +func (NotImplementedFile) ReadAt(p []byte, offset int64) (int, error) { + return 0, linux.ENOSYS +} + +// GetAttr implements p9.File.GetAttr. +func (NotImplementedFile) GetAttr(req p9.AttrMask) (p9.QID, p9.AttrMask, p9.Attr, error) { + return p9.QID{}, p9.AttrMask{}, p9.Attr{}, linux.ENOSYS +} + +// SetAttr implements p9.File.SetAttr. +func (NotImplementedFile) SetAttr(valid p9.SetAttrMask, attr p9.SetAttr) error { + return linux.ENOSYS +} + +// Remove implements p9.File.Remove. +func (NotImplementedFile) Remove() error { + return linux.ENOSYS +} + +// Rename implements p9.File.Rename. +func (NotImplementedFile) Rename(directory p9.File, name string) error { + return linux.ENOSYS +} + +// WriteAt implements p9.File.WriteAt. +func (NotImplementedFile) WriteAt(p []byte, offset int64) (int, error) { + return 0, linux.ENOSYS +} + +// Create implements p9.File.Create. +func (NotImplementedFile) Create(name string, mode p9.OpenFlags, permissions p9.FileMode, _ p9.UID, _ p9.GID) (p9.File, p9.QID, uint32, error) { + return nil, p9.QID{}, 0, linux.ENOSYS +} + +// Mkdir implements p9.File.Mkdir. +func (NotImplementedFile) Mkdir(name string, permissions p9.FileMode, _ p9.UID, _ p9.GID) (p9.QID, error) { + return p9.QID{}, linux.ENOSYS +} + +// Symlink implements p9.File.Symlink. +func (NotImplementedFile) Symlink(oldname string, newname string, _ p9.UID, _ p9.GID) (p9.QID, error) { + return p9.QID{}, linux.ENOSYS +} + +// Link implements p9.File.Link. +func (NotImplementedFile) Link(target p9.File, newname string) error { + return linux.ENOSYS +} + +// Mknod implements p9.File.Mknod. +func (NotImplementedFile) Mknod(name string, mode p9.FileMode, major uint32, minor uint32, _ p9.UID, _ p9.GID) (p9.QID, error) { + return p9.QID{}, linux.ENOSYS +} + +// RenameAt implements p9.File.RenameAt. +func (NotImplementedFile) RenameAt(oldname string, newdir p9.File, newname string) error { + return linux.ENOSYS +} + +// UnlinkAt implements p9.File.UnlinkAt. +func (NotImplementedFile) UnlinkAt(name string, flags uint32) error { + return linux.ENOSYS +} + +// Readdir implements p9.File.Readdir. +func (NotImplementedFile) Readdir(offset uint64, count uint32) (p9.Dirents, error) { + return nil, linux.ENOSYS +} + +// Readlink implements p9.File.Readlink. +func (NotImplementedFile) Readlink() (string, error) { + return "", linux.ENOSYS +} + +// XattrUnimplemented implements Xattr methods returning ENOSYS. +type XattrUnimplemented struct{} + +// SetXattr implements p9.File.SetXattr. +func (XattrUnimplemented) SetXattr(attr string, data []byte, flags p9.XattrFlags) error { + return linux.ENOSYS +} + +// GetXattr implements p9.File.GetXattr. +func (XattrUnimplemented) GetXattr(attr string) ([]byte, error) { + return nil, linux.ENOSYS +} + +// ListXattrs implements p9.File.ListXattrs. +func (XattrUnimplemented) ListXattrs() ([]string, error) { + return nil, linux.ENOSYS +} + +// RemoveXattr implements p9.File.RemoveXattr. +func (XattrUnimplemented) RemoveXattr(attr string) error { + return linux.ENOSYS +} + +type NotLockable struct{} + +// Lock implements p9.File.Lock. +func (NotLockable) Lock(pid int, locktype p9.LockType, flags p9.LockFlags, start, length uint64, client string) (p9.LockStatus, error) { + return p9.LockStatusOK, linux.ENOSYS +} diff --git a/vendor/github.com/hugelgupf/p9/fsimpl/xattr/xattr.go b/vendor/github.com/hugelgupf/p9/fsimpl/xattr/xattr.go new file mode 100644 index 000000000..fadd2dd04 --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/fsimpl/xattr/xattr.go @@ -0,0 +1,39 @@ +//go:build unix && !openbsd && !solaris + +package xattr + +import ( + "io/fs" + "strings" + + "golang.org/x/sys/unix" +) + +func List(p string) ([]string, error) { + sz, err := unix.Listxattr(p, nil) + if err != nil { + return nil, &fs.PathError{Op: "listxattr-get-size", Path: p, Err: err} + } + + b := make([]byte, sz) + sz, err = unix.Listxattr(p, b) + if err != nil { + return nil, &fs.PathError{Op: "listxattr", Path: p, Err: err} + } + + return strings.Split(strings.Trim(string(b[:sz]), "\000"), "\000"), nil +} + +func Get(p string, attr string) ([]byte, error) { + sz, err := unix.Getxattr(p, attr, nil) + if err != nil { + return nil, &fs.PathError{Op: "getxattr-get-size", Path: p, Err: err} + } + + b := make([]byte, sz) + sz, err = unix.Getxattr(p, attr, b) + if err != nil { + return nil, &fs.PathError{Op: "getxattr", Path: p, Err: err} + } + return b[:sz], nil +} diff --git a/vendor/github.com/hugelgupf/p9/fsimpl/xattr/xattr_windows.go b/vendor/github.com/hugelgupf/p9/fsimpl/xattr/xattr_windows.go new file mode 100644 index 000000000..835e8b704 --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/fsimpl/xattr/xattr_windows.go @@ -0,0 +1,11 @@ +package xattr + +import "github.com/hugelgupf/p9/linux" + +func List(p string) ([]string, error) { + return nil, linux.ENOSYS +} + +func Get(p string, attr string) ([]byte, error) { + return nil, linux.ENOSYS +} diff --git a/vendor/github.com/hugelgupf/p9/internal/doc.go b/vendor/github.com/hugelgupf/p9/internal/doc.go new file mode 100644 index 000000000..fef18691b --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/internal/doc.go @@ -0,0 +1,2 @@ +// Package sys abstracts operating system features for p9. +package internal diff --git a/vendor/github.com/hugelgupf/p9/internal/stat_bsd.go b/vendor/github.com/hugelgupf/p9/internal/stat_bsd.go new file mode 100644 index 000000000..e0ce5c760 --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/internal/stat_bsd.go @@ -0,0 +1,31 @@ +//go:build freebsd || darwin || netbsd +// +build freebsd darwin netbsd + +package internal + +import ( + "os" + "syscall" + + "golang.org/x/sys/unix" +) + +// InfoToStat takes a platform native FileInfo and converts it into a 9P2000.L compatible Stat_t +func InfoToStat(fi os.FileInfo) *Stat_t { + nativeStat := fi.Sys().(*syscall.Stat_t) + return &Stat_t{ + Dev: nativeStat.Dev, + Ino: nativeStat.Ino, + Nlink: nativeStat.Nlink, + Mode: nativeStat.Mode, + Uid: nativeStat.Uid, + Gid: nativeStat.Gid, + Rdev: nativeStat.Rdev, + Size: nativeStat.Size, + Blksize: nativeStat.Blksize, + Blocks: nativeStat.Blocks, + Atim: unix.NsecToTimespec(syscall.TimespecToNsec(nativeStat.Atimespec)), + Mtim: unix.NsecToTimespec(syscall.TimespecToNsec(nativeStat.Mtimespec)), + Ctim: unix.NsecToTimespec(syscall.TimespecToNsec(nativeStat.Ctimespec)), + } +} diff --git a/vendor/github.com/hugelgupf/p9/internal/stat_openbsd.go b/vendor/github.com/hugelgupf/p9/internal/stat_openbsd.go new file mode 100644 index 000000000..5549e63f0 --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/internal/stat_openbsd.go @@ -0,0 +1,28 @@ +package internal + +import ( + "os" + "syscall" + + "golang.org/x/sys/unix" +) + +// InfoToStat takes a platform native FileInfo and converts it into a 9P2000.L compatible Stat_t +func InfoToStat(fi os.FileInfo) *Stat_t { + nativeStat := fi.Sys().(*syscall.Stat_t) + return &Stat_t{ + Dev: nativeStat.Dev, + Ino: nativeStat.Ino, + Nlink: nativeStat.Nlink, + Mode: nativeStat.Mode, + Uid: nativeStat.Uid, + Gid: nativeStat.Gid, + Rdev: nativeStat.Rdev, + Size: nativeStat.Size, + Blksize: int32(nativeStat.Blksize), + Blocks: nativeStat.Blocks, + Atim: unix.NsecToTimespec(syscall.TimespecToNsec(nativeStat.Atim)), + Mtim: unix.NsecToTimespec(syscall.TimespecToNsec(nativeStat.Mtim)), + Ctim: unix.NsecToTimespec(syscall.TimespecToNsec(nativeStat.Ctim)), + } +} diff --git a/vendor/github.com/hugelgupf/p9/internal/stat_standard.go b/vendor/github.com/hugelgupf/p9/internal/stat_standard.go new file mode 100644 index 000000000..5ad4083f5 --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/internal/stat_standard.go @@ -0,0 +1,31 @@ +//go:build linux || dragonfly || solaris +// +build linux dragonfly solaris + +package internal + +import ( + "os" + "syscall" + + "golang.org/x/sys/unix" +) + +// InfoToStat takes a platform native FileInfo and converts it into a 9P2000.L compatible Stat_t +func InfoToStat(fi os.FileInfo) *Stat_t { + nativeStat := fi.Sys().(*syscall.Stat_t) + return &Stat_t{ + Dev: nativeStat.Dev, + Ino: nativeStat.Ino, + Nlink: nativeStat.Nlink, + Mode: nativeStat.Mode, + Uid: nativeStat.Uid, + Gid: nativeStat.Gid, + Rdev: nativeStat.Rdev, + Size: nativeStat.Size, + Blksize: nativeStat.Blksize, + Blocks: nativeStat.Blocks, + Atim: unix.NsecToTimespec(syscall.TimespecToNsec(nativeStat.Atim)), + Mtim: unix.NsecToTimespec(syscall.TimespecToNsec(nativeStat.Mtim)), + Ctim: unix.NsecToTimespec(syscall.TimespecToNsec(nativeStat.Ctim)), + } +} diff --git a/vendor/github.com/hugelgupf/p9/internal/stat_unix.go b/vendor/github.com/hugelgupf/p9/internal/stat_unix.go new file mode 100644 index 000000000..0c02369ef --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/internal/stat_unix.go @@ -0,0 +1,11 @@ +//go:build !windows +// +build !windows + +package internal + +import ( + "golang.org/x/sys/unix" +) + +// Stat_t is the Linux Stat_t. +type Stat_t = unix.Stat_t diff --git a/vendor/github.com/hugelgupf/p9/internal/stat_windows.go b/vendor/github.com/hugelgupf/p9/internal/stat_windows.go new file mode 100644 index 000000000..2f7f0a088 --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/internal/stat_windows.go @@ -0,0 +1,74 @@ +package internal + +import ( + "os" +) + +// NOTE: taken from amd64 Linux +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Stat_t struct { + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + Size int64 + Blksize int64 + Blocks int64 + Atim Timespec + Mtim Timespec + Ctim Timespec +} + +// InfoToStat takes a platform native FileInfo and converts it into a 9P2000.L compatible Stat_t +func InfoToStat(fi os.FileInfo) *Stat_t { + return &Stat_t{ + Size: fi.Size(), + Mode: uint32(modeFromOS(fi.Mode())), + Mtim: Timespec{ + Sec: fi.ModTime().Unix(), + Nsec: fi.ModTime().UnixNano(), + }, + } + +} + +// TODO: copied from pkg p9 +// we should probably migrate the OS methods from p9 into sys +const ( + FileModeMask uint32 = 0170000 + ModeSocket = 0140000 + ModeSymlink = 0120000 + ModeRegular = 0100000 + ModeBlockDevice = 060000 + ModeDirectory = 040000 + ModeCharacterDevice = 020000 + ModeNamedPipe = 010000 +) + +func modeFromOS(mode os.FileMode) uint32 { + m := uint32(mode.Perm()) + switch { + case mode.IsDir(): + m |= ModeDirectory + case mode&os.ModeSymlink != 0: + m |= ModeSymlink + case mode&os.ModeSocket != 0: + m |= ModeSocket + case mode&os.ModeNamedPipe != 0: + m |= ModeNamedPipe + case mode&os.ModeCharDevice != 0: + m |= ModeCharacterDevice + case mode&os.ModeDevice != 0: + m |= ModeBlockDevice + default: + m |= ModeRegular + } + return m +} diff --git a/vendor/github.com/hugelgupf/p9/linux/errno.go b/vendor/github.com/hugelgupf/p9/linux/errno.go new file mode 100644 index 000000000..137cb59fd --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/linux/errno.go @@ -0,0 +1,291 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package linux + +import "fmt" + +// Errno is a Linux error number on every GOOS. +type Errno uintptr + +func (e Errno) Error() string { + if 0 <= int(e) && int(e) < len(errorTable) { + s := errorTable[e] + if s != "" { + return s + } + } + return fmt.Sprintf("errno %d", int(e)) +} + +// numbers defined on Linux/amd64. +const ( + E2BIG = Errno(0x7) + EACCES = Errno(0xd) + EADDRINUSE = Errno(0x62) + EADDRNOTAVAIL = Errno(0x63) + EADV = Errno(0x44) + EAFNOSUPPORT = Errno(0x61) + EAGAIN = Errno(0xb) + EALREADY = Errno(0x72) + EBADE = Errno(0x34) + EBADF = Errno(0x9) + EBADFD = Errno(0x4d) + EBADMSG = Errno(0x4a) + EBADR = Errno(0x35) + EBADRQC = Errno(0x38) + EBADSLT = Errno(0x39) + EBFONT = Errno(0x3b) + EBUSY = Errno(0x10) + ECANCELED = Errno(0x7d) + ECHILD = Errno(0xa) + ECHRNG = Errno(0x2c) + ECOMM = Errno(0x46) + ECONNABORTED = Errno(0x67) + ECONNREFUSED = Errno(0x6f) + ECONNRESET = Errno(0x68) + EDEADLK = Errno(0x23) + EDEADLOCK = Errno(0x23) + EDESTADDRREQ = Errno(0x59) + EDOM = Errno(0x21) + EDOTDOT = Errno(0x49) + EDQUOT = Errno(0x7a) + EEXIST = Errno(0x11) + EFAULT = Errno(0xe) + EFBIG = Errno(0x1b) + EHOSTDOWN = Errno(0x70) + EHOSTUNREACH = Errno(0x71) + EHWPOISON = Errno(0x85) + EIDRM = Errno(0x2b) + EILSEQ = Errno(0x54) + EINPROGRESS = Errno(0x73) + EINTR = Errno(0x4) + EINVAL = Errno(0x16) + EIO = Errno(0x5) + EISCONN = Errno(0x6a) + EISDIR = Errno(0x15) + EISNAM = Errno(0x78) + EKEYEXPIRED = Errno(0x7f) + EKEYREJECTED = Errno(0x81) + EKEYREVOKED = Errno(0x80) + EL2HLT = Errno(0x33) + EL2NSYNC = Errno(0x2d) + EL3HLT = Errno(0x2e) + EL3RST = Errno(0x2f) + ELIBACC = Errno(0x4f) + ELIBBAD = Errno(0x50) + ELIBEXEC = Errno(0x53) + ELIBMAX = Errno(0x52) + ELIBSCN = Errno(0x51) + ELNRNG = Errno(0x30) + ELOOP = Errno(0x28) + EMEDIUMTYPE = Errno(0x7c) + EMFILE = Errno(0x18) + EMLINK = Errno(0x1f) + EMSGSIZE = Errno(0x5a) + EMULTIHOP = Errno(0x48) + ENAMETOOLONG = Errno(0x24) + ENAVAIL = Errno(0x77) + ENETDOWN = Errno(0x64) + ENETRESET = Errno(0x66) + ENETUNREACH = Errno(0x65) + ENFILE = Errno(0x17) + ENOANO = Errno(0x37) + ENOBUFS = Errno(0x69) + ENOCSI = Errno(0x32) + ENODATA = Errno(0x3d) + ENODEV = Errno(0x13) + ENOENT = Errno(0x2) + ENOEXEC = Errno(0x8) + ENOKEY = Errno(0x7e) + ENOLCK = Errno(0x25) + ENOLINK = Errno(0x43) + ENOMEDIUM = Errno(0x7b) + ENOMEM = Errno(0xc) + ENOMSG = Errno(0x2a) + ENONET = Errno(0x40) + ENOPKG = Errno(0x41) + ENOPROTOOPT = Errno(0x5c) + ENOSPC = Errno(0x1c) + ENOSR = Errno(0x3f) + ENOSTR = Errno(0x3c) + ENOSYS = Errno(0x26) + ENOTBLK = Errno(0xf) + ENOTCONN = Errno(0x6b) + ENOTDIR = Errno(0x14) + ENOTEMPTY = Errno(0x27) + ENOTNAM = Errno(0x76) + ENOTRECOVERABLE = Errno(0x83) + ENOTSOCK = Errno(0x58) + ENOTSUP = Errno(0x5f) + ENOTTY = Errno(0x19) + ENOTUNIQ = Errno(0x4c) + ENXIO = Errno(0x6) + EOPNOTSUPP = Errno(0x5f) + EOVERFLOW = Errno(0x4b) + EOWNERDEAD = Errno(0x82) + EPERM = Errno(0x1) + EPFNOSUPPORT = Errno(0x60) + EPIPE = Errno(0x20) + EPROTO = Errno(0x47) + EPROTONOSUPPORT = Errno(0x5d) + EPROTOTYPE = Errno(0x5b) + ERANGE = Errno(0x22) + EREMCHG = Errno(0x4e) + EREMOTE = Errno(0x42) + EREMOTEIO = Errno(0x79) + ERESTART = Errno(0x55) + ERFKILL = Errno(0x84) + EROFS = Errno(0x1e) + ESHUTDOWN = Errno(0x6c) + ESOCKTNOSUPPORT = Errno(0x5e) + ESPIPE = Errno(0x1d) + ESRCH = Errno(0x3) + ESRMNT = Errno(0x45) + ESTALE = Errno(0x74) + ESTRPIPE = Errno(0x56) + ETIME = Errno(0x3e) + ETIMEDOUT = Errno(0x6e) + ETOOMANYREFS = Errno(0x6d) + ETXTBSY = Errno(0x1a) + EUCLEAN = Errno(0x75) + EUNATCH = Errno(0x31) + EUSERS = Errno(0x57) + EWOULDBLOCK = Errno(0xb) + EXDEV = Errno(0x12) + EXFULL = Errno(0x36) +) + +var errorTable = [...]string{ + 1: "operation not permitted", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "input/output error", + 6: "no such device or address", + 7: "argument list too long", + 8: "exec format error", + 9: "bad file descriptor", + 10: "no child processes", + 11: "resource temporarily unavailable", + 12: "cannot allocate memory", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device or resource busy", + 17: "file exists", + 18: "invalid cross-device link", + 19: "no such device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "too many open files in system", + 24: "too many open files", + 25: "inappropriate ioctl for device", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "numerical argument out of domain", + 34: "numerical result out of range", + 35: "resource deadlock avoided", + 36: "file name too long", + 37: "no locks available", + 38: "function not implemented", + 39: "directory not empty", + 40: "too many levels of symbolic links", + 42: "no message of desired type", + 43: "identifier removed", + 44: "channel number out of range", + 45: "level 2 not synchronized", + 46: "level 3 halted", + 47: "level 3 reset", + 48: "link number out of range", + 49: "protocol driver not attached", + 50: "no CSI structure available", + 51: "level 2 halted", + 52: "invalid exchange", + 53: "invalid request descriptor", + 54: "exchange full", + 55: "no anode", + 56: "invalid request code", + 57: "invalid slot", + 59: "bad font file format", + 60: "device not a stream", + 61: "no data available", + 62: "timer expired", + 63: "out of streams resources", + 64: "machine is not on the network", + 65: "package not installed", + 66: "object is remote", + 67: "link has been severed", + 68: "advertise error", + 69: "srmount error", + 70: "communication error on send", + 71: "protocol error", + 72: "multihop attempted", + 73: "RFS specific error", + 74: "bad message", + 75: "value too large for defined data type", + 76: "name not unique on network", + 77: "file descriptor in bad state", + 78: "remote address changed", + 79: "can not access a needed shared library", + 80: "accessing a corrupted shared library", + 81: ".lib section in a.out corrupted", + 82: "attempting to link in too many shared libraries", + 83: "cannot exec a shared library directly", + 84: "invalid or incomplete multibyte or wide character", + 85: "interrupted system call should be restarted", + 86: "streams pipe error", + 87: "too many users", + 88: "socket operation on non-socket", + 89: "destination address required", + 90: "message too long", + 91: "protocol wrong type for socket", + 92: "protocol not available", + 93: "protocol not supported", + 94: "socket type not supported", + 95: "operation not supported", + 96: "protocol family not supported", + 97: "address family not supported by protocol", + 98: "address already in use", + 99: "cannot assign requested address", + 100: "network is down", + 101: "network is unreachable", + 102: "network dropped connection on reset", + 103: "software caused connection abort", + 104: "connection reset by peer", + 105: "no buffer space available", + 106: "transport endpoint is already connected", + 107: "transport endpoint is not connected", + 108: "cannot send after transport endpoint shutdown", + 109: "too many references: cannot splice", + 110: "connection timed out", + 111: "connection refused", + 112: "host is down", + 113: "no route to host", + 114: "operation already in progress", + 115: "operation now in progress", + 116: "stale NFS file handle", + 117: "structure needs cleaning", + 118: "not a XENIX named type file", + 119: "no XENIX semaphores available", + 120: "is a named type file", + 121: "remote I/O error", + 122: "disk quota exceeded", + 123: "no medium found", + 124: "wrong medium type", + 125: "operation canceled", + 126: "required key not available", + 127: "key has expired", + 128: "key has been revoked", + 129: "key was rejected by service", + 130: "owner died", + 131: "state not recoverable", + 132: "operation not possible due to RF-kill", +} diff --git a/vendor/github.com/hugelgupf/p9/linux/errors.go b/vendor/github.com/hugelgupf/p9/linux/errors.go new file mode 100644 index 000000000..cf4be5ae3 --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/linux/errors.go @@ -0,0 +1,38 @@ +package linux + +import ( + "errors" + "os" +) + +// ExtractErrno extracts an [Errno] from an error, best effort. +// +// If the system-specific or Go-specific error cannot be mapped to anything, it +// will be logged and EIO will be returned. +func ExtractErrno(err error) Errno { + for _, pair := range []struct { + error + Errno + }{ + {os.ErrNotExist, ENOENT}, + {os.ErrExist, EEXIST}, + {os.ErrPermission, EACCES}, + {os.ErrInvalid, EINVAL}, + } { + if errors.Is(err, pair.error) { + return pair.Errno + } + } + + var errno Errno + if errors.As(err, &errno) { + return errno + } + + if e := sysErrno(err); e != 0 { + return e + } + + // Default case. + return EIO +} diff --git a/vendor/github.com/hugelgupf/p9/linux/errors_linux.go b/vendor/github.com/hugelgupf/p9/linux/errors_linux.go new file mode 100644 index 000000000..b99a43747 --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/linux/errors_linux.go @@ -0,0 +1,17 @@ +//go:build linux +// +build linux + +package linux + +import ( + "errors" + "syscall" +) + +func sysErrno(err error) Errno { + var systemErr syscall.Errno + if errors.As(err, &systemErr) { + return Errno(systemErr) + } + return 0 +} diff --git a/vendor/github.com/hugelgupf/p9/linux/errors_unix.go b/vendor/github.com/hugelgupf/p9/linux/errors_unix.go new file mode 100644 index 000000000..11a65c451 --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/linux/errors_unix.go @@ -0,0 +1,21 @@ +//go:build !windows && !linux +// +build !windows,!linux + +package linux + +import "syscall" + +func sysErrno(err error) Errno { + se, ok := err.(syscall.Errno) + if ok { + // POSIX-defined errors seem to match up to error number 34 + // according to http://www.ioplex.com/~miallen/errcmpp.html. + // + // 9P2000.L expects Linux error codes, so after 34 we normalize. + if se <= 34 { + return Errno(se) + } + return 0 + } + return 0 +} diff --git a/vendor/github.com/hugelgupf/p9/linux/errors_windows.go b/vendor/github.com/hugelgupf/p9/linux/errors_windows.go new file mode 100644 index 000000000..fa3e13559 --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/linux/errors_windows.go @@ -0,0 +1,28 @@ +//go:build windows +// +build windows + +package linux + +import ( + "errors" + "syscall" +) + +func sysErrno(err error) Errno { + for _, pair := range []struct { + error + Errno + }{ + {syscall.ERROR_FILE_NOT_FOUND, ENOENT}, + {syscall.ERROR_PATH_NOT_FOUND, ENOENT}, + {syscall.ERROR_ACCESS_DENIED, EACCES}, + {syscall.ERROR_FILE_EXISTS, EEXIST}, + {syscall.ERROR_INSUFFICIENT_BUFFER, ENOMEM}, + } { + if errors.Is(err, pair.error) { + return pair.Errno + } + } + // No clue what to do with others. + return 0 +} diff --git a/vendor/github.com/hugelgupf/p9/p9/buffer.go b/vendor/github.com/hugelgupf/p9/p9/buffer.go new file mode 100644 index 000000000..ab71cda75 --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/p9/buffer.go @@ -0,0 +1,253 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package p9 + +import ( + "encoding/binary" +) + +// encoder is used for messages and 9P primitives. +type encoder interface { + // decode decodes from the given buffer. decode may be called more than once + // to reuse the instance. It must clear any previous state. + // + // This may not fail, exhaustion will be recorded in the buffer. + decode(b *buffer) + + // encode encodes to the given buffer. + // + // This may not fail. + encode(b *buffer) +} + +// order is the byte order used for encoding. +var order = binary.LittleEndian + +// buffer is a slice that is consumed. +// +// This is passed to the encoder methods. +type buffer struct { + // data is the underlying data. This may grow during encode. + data []byte + + // overflow indicates whether an overflow has occurred. + overflow bool +} + +// append appends n bytes to the buffer and returns a slice pointing to the +// newly appended bytes. +func (b *buffer) append(n int) []byte { + b.data = append(b.data, make([]byte, n)...) + return b.data[len(b.data)-n:] +} + +// consume consumes n bytes from the buffer. +func (b *buffer) consume(n int) ([]byte, bool) { + if !b.has(n) { + b.markOverrun() + return nil, false + } + rval := b.data[:n] + b.data = b.data[n:] + return rval, true +} + +// has returns true if n bytes are available. +func (b *buffer) has(n int) bool { + return len(b.data) >= n +} + +// markOverrun immediately marks this buffer as overrun. +// +// This is used by ReadString, since some invalid data implies the rest of the +// buffer is no longer valid either. +func (b *buffer) markOverrun() { + b.overflow = true +} + +// isOverrun returns true if this buffer has run past the end. +func (b *buffer) isOverrun() bool { + return b.overflow +} + +// Read8 reads a byte from the buffer. +func (b *buffer) Read8() uint8 { + v, ok := b.consume(1) + if !ok { + return 0 + } + return uint8(v[0]) +} + +// Read16 reads a 16-bit value from the buffer. +func (b *buffer) Read16() uint16 { + v, ok := b.consume(2) + if !ok { + return 0 + } + return order.Uint16(v) +} + +// Read32 reads a 32-bit value from the buffer. +func (b *buffer) Read32() uint32 { + v, ok := b.consume(4) + if !ok { + return 0 + } + return order.Uint32(v) +} + +// Read64 reads a 64-bit value from the buffer. +func (b *buffer) Read64() uint64 { + v, ok := b.consume(8) + if !ok { + return 0 + } + return order.Uint64(v) +} + +// ReadQIDType reads a QIDType value. +func (b *buffer) ReadQIDType() QIDType { + return QIDType(b.Read8()) +} + +// ReadTag reads a Tag value. +func (b *buffer) ReadTag() tag { + return tag(b.Read16()) +} + +// ReadFID reads a FID value. +func (b *buffer) ReadFID() fid { + return fid(b.Read32()) +} + +// ReadUID reads a UID value. +func (b *buffer) ReadUID() UID { + return UID(b.Read32()) +} + +// ReadGID reads a GID value. +func (b *buffer) ReadGID() GID { + return GID(b.Read32()) +} + +// ReadPermissions reads a file mode value and applies the mask for permissions. +func (b *buffer) ReadPermissions() FileMode { + return b.ReadFileMode() & permissionsMask +} + +// ReadFileMode reads a file mode value. +func (b *buffer) ReadFileMode() FileMode { + return FileMode(b.Read32()) +} + +// ReadOpenFlags reads an OpenFlags. +func (b *buffer) ReadOpenFlags() OpenFlags { + return OpenFlags(b.Read32()) +} + +// ReadMsgType writes a msgType. +func (b *buffer) ReadMsgType() msgType { + return msgType(b.Read8()) +} + +// ReadString deserializes a string. +func (b *buffer) ReadString() string { + l := b.Read16() + if !b.has(int(l)) { + // Mark the buffer as corrupted. + b.markOverrun() + return "" + } + + bs := make([]byte, l) + for i := 0; i < int(l); i++ { + bs[i] = byte(b.Read8()) + } + return string(bs) +} + +// Write8 writes a byte to the buffer. +func (b *buffer) Write8(v uint8) { + b.append(1)[0] = byte(v) +} + +// Write16 writes a 16-bit value to the buffer. +func (b *buffer) Write16(v uint16) { + order.PutUint16(b.append(2), v) +} + +// Write32 writes a 32-bit value to the buffer. +func (b *buffer) Write32(v uint32) { + order.PutUint32(b.append(4), v) +} + +// Write64 writes a 64-bit value to the buffer. +func (b *buffer) Write64(v uint64) { + order.PutUint64(b.append(8), v) +} + +// WriteQIDType writes a QIDType value. +func (b *buffer) WriteQIDType(qidType QIDType) { + b.Write8(uint8(qidType)) +} + +// WriteTag writes a Tag value. +func (b *buffer) WriteTag(tag tag) { + b.Write16(uint16(tag)) +} + +// WriteFID writes a FID value. +func (b *buffer) WriteFID(fid fid) { + b.Write32(uint32(fid)) +} + +// WriteUID writes a UID value. +func (b *buffer) WriteUID(uid UID) { + b.Write32(uint32(uid)) +} + +// WriteGID writes a GID value. +func (b *buffer) WriteGID(gid GID) { + b.Write32(uint32(gid)) +} + +// WritePermissions applies a permissions mask and writes the FileMode. +func (b *buffer) WritePermissions(perm FileMode) { + b.WriteFileMode(perm & permissionsMask) +} + +// WriteFileMode writes a FileMode. +func (b *buffer) WriteFileMode(mode FileMode) { + b.Write32(uint32(mode)) +} + +// WriteOpenFlags writes an OpenFlags. +func (b *buffer) WriteOpenFlags(flags OpenFlags) { + b.Write32(uint32(flags)) +} + +// WriteMsgType writes a MsgType. +func (b *buffer) WriteMsgType(t msgType) { + b.Write8(uint8(t)) +} + +// WriteString serializes the given string. +func (b *buffer) WriteString(s string) { + b.Write16(uint16(len(s))) + for i := 0; i < len(s); i++ { + b.Write8(byte(s[i])) + } +} diff --git a/vendor/github.com/hugelgupf/p9/p9/client.go b/vendor/github.com/hugelgupf/p9/p9/client.go new file mode 100644 index 000000000..76ce6de04 --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/p9/client.go @@ -0,0 +1,344 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package p9 + +import ( + "errors" + "fmt" + "io" + "sync" + + "github.com/hugelgupf/p9/linux" + "github.com/u-root/uio/ulog" +) + +// ErrOutOfTags indicates no tags are available. +var ErrOutOfTags = errors.New("out of tags -- messages lost?") + +// ErrOutOfFIDs indicates no more FIDs are available. +var ErrOutOfFIDs = errors.New("out of FIDs -- messages lost?") + +// ErrUnexpectedTag indicates a response with an unexpected tag was received. +var ErrUnexpectedTag = errors.New("unexpected tag in response") + +// ErrVersionsExhausted indicates that all versions to negotiate have been exhausted. +var ErrVersionsExhausted = errors.New("exhausted all versions to negotiate") + +// ErrBadVersionString indicates that the version string is malformed or unsupported. +var ErrBadVersionString = errors.New("bad version string") + +// ErrBadResponse indicates the response didn't match the request. +type ErrBadResponse struct { + Got msgType + Want msgType +} + +// Error returns a highly descriptive error. +func (e *ErrBadResponse) Error() string { + return fmt.Sprintf("unexpected message type: got %v, want %v", e.Got, e.Want) +} + +// response is the asynchronous return from recv. +// +// This is used in the pending map below. +type response struct { + r message + done chan error +} + +var responsePool = sync.Pool{ + New: func() interface{} { + return &response{ + done: make(chan error, 1), + } + }, +} + +// Client is at least a 9P2000.L client. +type Client struct { + // conn is the connected conn. + conn io.ReadWriteCloser + + // tagPool is the collection of available tags. + tagPool pool + + // fidPool is the collection of available fids. + fidPool pool + + // pending is the set of pending messages. + pending map[tag]*response + pendingMu sync.Mutex + + // sendMu is the lock for sending a request. + sendMu sync.Mutex + + // recvr is essentially a mutex for calling recv. + // + // Whoever writes to this channel is permitted to call recv. When + // finished calling recv, this channel should be emptied. + recvr chan bool + + // messageSize is the maximum total size of a message. + messageSize uint32 + + // payloadSize is the maximum payload size of a read or write + // request. For large reads and writes this means that the + // read or write is broken up into buffer-size/payloadSize + // requests. + payloadSize uint32 + + // version is the agreed upon version X of 9P2000.L.Google.X. + // version 0 implies 9P2000.L. + version uint32 + + // log is the logger to write to, if specified. + log ulog.Logger +} + +// ClientOpt enables optional client configuration. +type ClientOpt func(*Client) error + +// WithMessageSize overrides the default message size. +func WithMessageSize(m uint32) ClientOpt { + return func(c *Client) error { + // Need at least one byte of payload. + if m <= msgDotLRegistry.largestFixedSize { + return &ErrMessageTooLarge{ + size: m, + msize: msgDotLRegistry.largestFixedSize, + } + } + c.messageSize = m + return nil + } +} + +// WithClientLogger overrides the default logger for the client. +func WithClientLogger(l ulog.Logger) ClientOpt { + return func(c *Client) error { + c.log = l + return nil + } +} + +func roundDown(p uint32, align uint32) uint32 { + if p > align && p%align != 0 { + return p - p%align + } + return p +} + +// NewClient creates a new client. It performs a Tversion exchange with +// the server to assert that messageSize is ok to use. +// +// You should not use the same conn for multiple clients. +func NewClient(conn io.ReadWriteCloser, o ...ClientOpt) (*Client, error) { + c := &Client{ + conn: conn, + tagPool: pool{start: 1, limit: uint64(noTag)}, + fidPool: pool{start: 1, limit: uint64(noFID)}, + pending: make(map[tag]*response), + recvr: make(chan bool, 1), + messageSize: DefaultMessageSize, + log: ulog.Null, + + // Request a high version by default. + version: highestSupportedVersion, + } + + for _, opt := range o { + if err := opt(c); err != nil { + return nil, err + } + } + + // Compute a payload size and round to 512 (normal block size) + // if it's larger than a single block. + c.payloadSize = roundDown(c.messageSize-msgDotLRegistry.largestFixedSize, 512) + + // Agree upon a version. + requested := c.version + for { + rversion := rversion{} + err := c.sendRecv(&tversion{Version: versionString(version9P2000L, requested), MSize: c.messageSize}, &rversion) + + // The server told us to try again with a lower version. + if errors.Is(err, linux.EAGAIN) { + if requested == lowestSupportedVersion { + return nil, ErrVersionsExhausted + } + requested-- + continue + } + + // We requested an impossible version or our other parameters were bogus. + if err != nil { + return nil, err + } + + // Parse the version. + baseVersion, version, ok := parseVersion(rversion.Version) + if !ok { + // The server gave us a bad version. We return a generically worrisome error. + c.log.Printf("server returned bad version string %q", rversion.Version) + return nil, ErrBadVersionString + } + if baseVersion != version9P2000L { + c.log.Printf("server returned unsupported base version %q (version %q)", baseVersion, rversion.Version) + return nil, ErrBadVersionString + } + c.version = version + break + } + return c, nil +} + +// handleOne handles a single incoming message. +// +// This should only be called with the token from recvr. Note that the received +// tag will automatically be cleared from pending. +func (c *Client) handleOne() { + t, r, err := recv(c.log, c.conn, c.messageSize, func(t tag, mt msgType) (message, error) { + c.pendingMu.Lock() + resp := c.pending[t] + c.pendingMu.Unlock() + + // Not expecting this message? + if resp == nil { + c.log.Printf("client received unexpected tag %v, ignoring", t) + return nil, ErrUnexpectedTag + } + + // Is it an error? We specifically allow this to + // go through, and then we deserialize below. + if mt == msgRlerror { + return &rlerror{}, nil + } + + // Does it match expectations? + if mt != resp.r.typ() { + return nil, &ErrBadResponse{Got: mt, Want: resp.r.typ()} + } + + // Return the response. + return resp.r, nil + }) + + if err != nil { + // No tag was extracted (probably a conn error). + // + // Likely catastrophic. Notify all waiters and clear pending. + c.pendingMu.Lock() + for _, resp := range c.pending { + resp.done <- err + } + c.pending = make(map[tag]*response) + c.pendingMu.Unlock() + } else { + // Process the tag. + // + // We know that is is contained in the map because our lookup function + // above must have succeeded (found the tag) to return nil err. + c.pendingMu.Lock() + resp := c.pending[t] + delete(c.pending, t) + c.pendingMu.Unlock() + resp.r = r + resp.done <- err + } +} + +// waitAndRecv co-ordinates with other receivers to handle responses. +func (c *Client) waitAndRecv(done chan error) error { + for { + select { + case err := <-done: + return err + case c.recvr <- true: + select { + case err := <-done: + // It's possible that we got the token, despite + // done also being available. Check for that. + <-c.recvr + return err + default: + // Handle receiving one tag. + c.handleOne() + + // Return the token. + <-c.recvr + } + } + } +} + +// sendRecv performs a roundtrip message exchange. +// +// This is called by internal functions. +func (c *Client) sendRecv(tm message, rm message) error { + t, ok := c.tagPool.Get() + if !ok { + return ErrOutOfTags + } + defer c.tagPool.Put(t) + + // Indicate we're expecting a response. + // + // Note that the tag will be cleared from pending + // automatically (see handleOne for details). + resp := responsePool.Get().(*response) + defer responsePool.Put(resp) + resp.r = rm + c.pendingMu.Lock() + c.pending[tag(t)] = resp + c.pendingMu.Unlock() + + // Send the request over the wire. + c.sendMu.Lock() + err := send(c.log, c.conn, tag(t), tm) + c.sendMu.Unlock() + if err != nil { + return fmt.Errorf("send: %w", err) + } + + // Co-ordinate with other receivers. + if err := c.waitAndRecv(resp.done); err != nil { + return fmt.Errorf("wait: %w", err) + } + + // Is it an error message? + // + // For convenience, we transform these directly + // into errors. Handlers need not handle this case. + if rlerr, ok := resp.r.(*rlerror); ok { + return linux.Errno(rlerr.Error) + } + + // At this point, we know it matches. + // + // Per recv call above, we will only allow a type + // match (and give our r) or an instance of Rlerror. + return nil +} + +// Version returns the negotiated 9P2000.L.Google version number. +func (c *Client) Version() uint32 { + return c.version +} + +// Close closes the underlying connection. +func (c *Client) Close() error { + return c.conn.Close() +} diff --git a/vendor/github.com/hugelgupf/p9/p9/client_file.go b/vendor/github.com/hugelgupf/p9/p9/client_file.go new file mode 100644 index 000000000..f69324e2e --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/p9/client_file.go @@ -0,0 +1,568 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package p9 + +import ( + "fmt" + "io" + "runtime" + "sync/atomic" + + "github.com/hugelgupf/p9/linux" +) + +// Attach attaches to a server. +// +// Note that authentication is not currently supported. +func (c *Client) Attach(name string) (File, error) { + id, ok := c.fidPool.Get() + if !ok { + return nil, ErrOutOfFIDs + } + + rattach := rattach{} + if err := c.sendRecv(&tattach{fid: fid(id), Auth: tauth{AttachName: name, Authenticationfid: noFID, UID: NoUID}}, &rattach); err != nil { + c.fidPool.Put(id) + return nil, err + } + + return c.newFile(fid(id)), nil +} + +// newFile returns a new client file. +func (c *Client) newFile(fid fid) *clientFile { + cf := &clientFile{ + client: c, + fid: fid, + } + + // Make sure the file is closed. + runtime.SetFinalizer(cf, (*clientFile).Close) + + return cf +} + +// clientFile is provided to clients. +// +// This proxies all of the interfaces found in file.go. +type clientFile struct { + // client is the originating client. + client *Client + + // fid is the fid for this file. + fid fid + + // closed indicates whether this file has been closed. + closed uint32 +} + +// SetXattr implements p9.File.SetXattr. +func (c *clientFile) SetXattr(attr string, data []byte, flags XattrFlags) error { + return linux.ENOSYS +} + +// RemoveXattr implements p9.File.RemoveXattr. +func (c *clientFile) RemoveXattr(attr string) error { + return linux.ENOSYS +} + +// GetXattr implements p9.File.GetXattr. +func (c *clientFile) GetXattr(attr string) ([]byte, error) { + return nil, linux.ENOSYS +} + +// ListXattrs implements p9.File.ListXattrs. +func (c *clientFile) ListXattrs() ([]string, error) { + return nil, linux.ENOSYS +} + +// Walk implements File.Walk. +func (c *clientFile) Walk(names []string) ([]QID, File, error) { + if atomic.LoadUint32(&c.closed) != 0 { + return nil, nil, linux.EBADF + } + + id, ok := c.client.fidPool.Get() + if !ok { + return nil, nil, ErrOutOfFIDs + } + + rwalk := rwalk{} + if err := c.client.sendRecv(&twalk{fid: c.fid, newFID: fid(id), Names: names}, &rwalk); err != nil { + c.client.fidPool.Put(id) + return nil, nil, err + } + + // Return a new client file. + return rwalk.QIDs, c.client.newFile(fid(id)), nil +} + +// WalkGetAttr implements File.WalkGetAttr. +func (c *clientFile) WalkGetAttr(components []string) ([]QID, File, AttrMask, Attr, error) { + if atomic.LoadUint32(&c.closed) != 0 { + return nil, nil, AttrMask{}, Attr{}, linux.EBADF + } + + if !versionSupportsTwalkgetattr(c.client.version) { + qids, file, err := c.Walk(components) + if err != nil { + return nil, nil, AttrMask{}, Attr{}, err + } + _, valid, attr, err := file.GetAttr(AttrMaskAll) + if err != nil { + file.Close() + return nil, nil, AttrMask{}, Attr{}, err + } + return qids, file, valid, attr, nil + } + + id, ok := c.client.fidPool.Get() + if !ok { + return nil, nil, AttrMask{}, Attr{}, ErrOutOfFIDs + } + + rwalkgetattr := rwalkgetattr{} + if err := c.client.sendRecv(&twalkgetattr{fid: c.fid, newFID: fid(id), Names: components}, &rwalkgetattr); err != nil { + c.client.fidPool.Put(id) + return nil, nil, AttrMask{}, Attr{}, err + } + + // Return a new client file. + return rwalkgetattr.QIDs, c.client.newFile(fid(id)), rwalkgetattr.Valid, rwalkgetattr.Attr, nil +} + +// StatFS implements File.StatFS. +func (c *clientFile) StatFS() (FSStat, error) { + if atomic.LoadUint32(&c.closed) != 0 { + return FSStat{}, linux.EBADF + } + + rstatfs := rstatfs{} + if err := c.client.sendRecv(&tstatfs{fid: c.fid}, &rstatfs); err != nil { + return FSStat{}, err + } + + return rstatfs.FSStat, nil +} + +// FSync implements File.FSync. +func (c *clientFile) FSync() error { + if atomic.LoadUint32(&c.closed) != 0 { + return linux.EBADF + } + + return c.client.sendRecv(&tfsync{fid: c.fid}, &rfsync{}) +} + +// GetAttr implements File.GetAttr. +func (c *clientFile) GetAttr(req AttrMask) (QID, AttrMask, Attr, error) { + if atomic.LoadUint32(&c.closed) != 0 { + return QID{}, AttrMask{}, Attr{}, linux.EBADF + } + + rgetattr := rgetattr{} + if err := c.client.sendRecv(&tgetattr{fid: c.fid, AttrMask: req}, &rgetattr); err != nil { + return QID{}, AttrMask{}, Attr{}, err + } + + return rgetattr.QID, rgetattr.Valid, rgetattr.Attr, nil +} + +// SetAttr implements File.SetAttr. +func (c *clientFile) SetAttr(valid SetAttrMask, attr SetAttr) error { + if atomic.LoadUint32(&c.closed) != 0 { + return linux.EBADF + } + + return c.client.sendRecv(&tsetattr{fid: c.fid, Valid: valid, SetAttr: attr}, &rsetattr{}) +} + +// Lock implements File.Lock +func (c *clientFile) Lock(pid int, locktype LockType, flags LockFlags, start, length uint64, client string) (LockStatus, error) { + if atomic.LoadUint32(&c.closed) != 0 { + return LockStatusError, linux.EBADF + } + + r := rlock{} + err := c.client.sendRecv(&tlock{ + Type: locktype, + Flags: flags, + Start: start, + Length: length, + PID: int32(pid), + Client: client, + }, &r) + return r.Status, err +} + +// Remove implements File.Remove. +// +// N.B. This method is no longer part of the file interface and should be +// considered deprecated. +func (c *clientFile) Remove() error { + // Avoid double close. + if !atomic.CompareAndSwapUint32(&c.closed, 0, 1) { + return linux.EBADF + } + runtime.SetFinalizer(c, nil) + + // Send the remove message. + if err := c.client.sendRecv(&tremove{fid: c.fid}, &rremove{}); err != nil { + return err + } + + // "It is correct to consider remove to be a clunk with the side effect + // of removing the file if permissions allow." + // https://swtch.com/plan9port/man/man9/remove.html + + // Return the fid to the pool. + c.client.fidPool.Put(uint64(c.fid)) + return nil +} + +// Close implements File.Close. +func (c *clientFile) Close() error { + // Avoid double close. + if !atomic.CompareAndSwapUint32(&c.closed, 0, 1) { + return linux.EBADF + } + runtime.SetFinalizer(c, nil) + + // Send the close message. + if err := c.client.sendRecv(&tclunk{fid: c.fid}, &rclunk{}); err != nil { + // If an error occurred, we toss away the fid. This isn't ideal, + // but I'm not sure what else makes sense in this context. + return err + } + + // Return the fid to the pool. + c.client.fidPool.Put(uint64(c.fid)) + return nil +} + +// Open implements File.Open. +func (c *clientFile) Open(flags OpenFlags) (QID, uint32, error) { + if atomic.LoadUint32(&c.closed) != 0 { + return QID{}, 0, linux.EBADF + } + + rlopen := rlopen{} + if err := c.client.sendRecv(&tlopen{fid: c.fid, Flags: flags}, &rlopen); err != nil { + return QID{}, 0, err + } + + return rlopen.QID, rlopen.IoUnit, nil +} + +// chunk applies fn to p in chunkSize-sized chunks until fn returns a partial result, p is +// exhausted, or an error is encountered (which may be io.EOF). +func chunk(chunkSize uint32, fn func([]byte, int64) (int, error), p []byte, offset int64) (int, error) { + // Some p9.Clients depend on executing fn on zero-byte buffers. Handle this + // as a special case (normally it is fine to short-circuit and return (0, nil)). + if len(p) == 0 { + return fn(p, offset) + } + + // total is the cumulative bytes processed. + var total int + for { + var n int + var err error + + // We're done, don't bother trying to do anything more. + if total == len(p) { + return total, nil + } + + // Apply fn to a chunkSize-sized (or less) chunk of p. + if len(p) < total+int(chunkSize) { + n, err = fn(p[total:], offset) + } else { + n, err = fn(p[total:total+int(chunkSize)], offset) + } + total += n + offset += int64(n) + + // Return whatever we have processed if we encounter an error. This error + // could be io.EOF. + if err != nil { + return total, err + } + + // Did we get a partial result? If so, return it immediately. + if n < int(chunkSize) { + return total, nil + } + + // If we received more bytes than we ever requested, this is a problem. + if total > len(p) { + panic(fmt.Sprintf("bytes completed (%d)) > requested (%d)", total, len(p))) + } + } +} + +// ReadAt proxies File.ReadAt. +func (c *clientFile) ReadAt(p []byte, offset int64) (int, error) { + return chunk(c.client.payloadSize, c.readAt, p, offset) +} + +func (c *clientFile) readAt(p []byte, offset int64) (int, error) { + if atomic.LoadUint32(&c.closed) != 0 { + return 0, linux.EBADF + } + + rread := rread{Data: p} + if err := c.client.sendRecv(&tread{fid: c.fid, Offset: uint64(offset), Count: uint32(len(p))}, &rread); err != nil { + return 0, err + } + + // The message may have been truncated, or for some reason a new buffer + // allocated. This isn't the common path, but we make sure that if the + // payload has changed we copy it. See transport.go for more information. + if len(p) > 0 && len(rread.Data) > 0 && &rread.Data[0] != &p[0] { + copy(p, rread.Data) + } + + // io.EOF is not an error that a p9 server can return. Use POSIX semantics to + // return io.EOF manually: zero bytes were returned and a non-zero buffer was used. + if len(rread.Data) == 0 && len(p) > 0 { + return 0, io.EOF + } + + return len(rread.Data), nil +} + +// WriteAt proxies File.WriteAt. +func (c *clientFile) WriteAt(p []byte, offset int64) (int, error) { + return chunk(c.client.payloadSize, c.writeAt, p, offset) +} + +func (c *clientFile) writeAt(p []byte, offset int64) (int, error) { + if atomic.LoadUint32(&c.closed) != 0 { + return 0, linux.EBADF + } + + rwrite := rwrite{} + if err := c.client.sendRecv(&twrite{fid: c.fid, Offset: uint64(offset), Data: p}, &rwrite); err != nil { + return 0, err + } + + return int(rwrite.Count), nil +} + +// Rename implements File.Rename. +func (c *clientFile) Rename(dir File, name string) error { + if atomic.LoadUint32(&c.closed) != 0 { + return linux.EBADF + } + + clientDir, ok := dir.(*clientFile) + if !ok { + return linux.EBADF + } + + return c.client.sendRecv(&trename{fid: c.fid, Directory: clientDir.fid, Name: name}, &rrename{}) +} + +// Create implements File.Create. +func (c *clientFile) Create(name string, openFlags OpenFlags, permissions FileMode, uid UID, gid GID) (File, QID, uint32, error) { + if atomic.LoadUint32(&c.closed) != 0 { + return nil, QID{}, 0, linux.EBADF + } + + msg := tlcreate{ + fid: c.fid, + Name: name, + OpenFlags: openFlags, + Permissions: permissions, + GID: NoGID, + } + + if versionSupportsTucreation(c.client.version) { + msg.GID = gid + rucreate := rucreate{} + if err := c.client.sendRecv(&tucreate{tlcreate: msg, UID: uid}, &rucreate); err != nil { + return nil, QID{}, 0, err + } + return c, rucreate.QID, rucreate.IoUnit, nil + } + + rlcreate := rlcreate{} + if err := c.client.sendRecv(&msg, &rlcreate); err != nil { + return nil, QID{}, 0, err + } + + return c, rlcreate.QID, rlcreate.IoUnit, nil +} + +// Mkdir implements File.Mkdir. +func (c *clientFile) Mkdir(name string, permissions FileMode, uid UID, gid GID) (QID, error) { + if atomic.LoadUint32(&c.closed) != 0 { + return QID{}, linux.EBADF + } + + msg := tmkdir{ + Directory: c.fid, + Name: name, + Permissions: permissions, + GID: NoGID, + } + + if versionSupportsTucreation(c.client.version) { + msg.GID = gid + rumkdir := rumkdir{} + if err := c.client.sendRecv(&tumkdir{tmkdir: msg, UID: uid}, &rumkdir); err != nil { + return QID{}, err + } + return rumkdir.QID, nil + } + + rmkdir := rmkdir{} + if err := c.client.sendRecv(&msg, &rmkdir); err != nil { + return QID{}, err + } + + return rmkdir.QID, nil +} + +// Symlink implements File.Symlink. +func (c *clientFile) Symlink(oldname string, newname string, uid UID, gid GID) (QID, error) { + if atomic.LoadUint32(&c.closed) != 0 { + return QID{}, linux.EBADF + } + + msg := tsymlink{ + Directory: c.fid, + Name: newname, + Target: oldname, + GID: NoGID, + } + + if versionSupportsTucreation(c.client.version) { + msg.GID = gid + rusymlink := rusymlink{} + if err := c.client.sendRecv(&tusymlink{tsymlink: msg, UID: uid}, &rusymlink); err != nil { + return QID{}, err + } + return rusymlink.QID, nil + } + + rsymlink := rsymlink{} + if err := c.client.sendRecv(&msg, &rsymlink); err != nil { + return QID{}, err + } + + return rsymlink.QID, nil +} + +// Link implements File.Link. +func (c *clientFile) Link(target File, newname string) error { + if atomic.LoadUint32(&c.closed) != 0 { + return linux.EBADF + } + + targetFile, ok := target.(*clientFile) + if !ok { + return linux.EBADF + } + + return c.client.sendRecv(&tlink{Directory: c.fid, Name: newname, Target: targetFile.fid}, &rlink{}) +} + +// Mknod implements File.Mknod. +func (c *clientFile) Mknod(name string, mode FileMode, major uint32, minor uint32, uid UID, gid GID) (QID, error) { + if atomic.LoadUint32(&c.closed) != 0 { + return QID{}, linux.EBADF + } + + msg := tmknod{ + Directory: c.fid, + Name: name, + Mode: mode, + Major: major, + Minor: minor, + GID: NoGID, + } + + if versionSupportsTucreation(c.client.version) { + msg.GID = gid + rumknod := rumknod{} + if err := c.client.sendRecv(&tumknod{tmknod: msg, UID: uid}, &rumknod); err != nil { + return QID{}, err + } + return rumknod.QID, nil + } + + rmknod := rmknod{} + if err := c.client.sendRecv(&msg, &rmknod); err != nil { + return QID{}, err + } + + return rmknod.QID, nil +} + +// RenameAt implements File.RenameAt. +func (c *clientFile) RenameAt(oldname string, newdir File, newname string) error { + if atomic.LoadUint32(&c.closed) != 0 { + return linux.EBADF + } + + clientNewDir, ok := newdir.(*clientFile) + if !ok { + return linux.EBADF + } + + return c.client.sendRecv(&trenameat{OldDirectory: c.fid, OldName: oldname, NewDirectory: clientNewDir.fid, NewName: newname}, &rrenameat{}) +} + +// UnlinkAt implements File.UnlinkAt. +func (c *clientFile) UnlinkAt(name string, flags uint32) error { + if atomic.LoadUint32(&c.closed) != 0 { + return linux.EBADF + } + + return c.client.sendRecv(&tunlinkat{Directory: c.fid, Name: name, Flags: flags}, &runlinkat{}) +} + +// Readdir implements File.Readdir. +func (c *clientFile) Readdir(offset uint64, count uint32) (Dirents, error) { + if atomic.LoadUint32(&c.closed) != 0 { + return nil, linux.EBADF + } + + rreaddir := rreaddir{} + if err := c.client.sendRecv(&treaddir{Directory: c.fid, Offset: offset, Count: count}, &rreaddir); err != nil { + return nil, err + } + + return rreaddir.Entries, nil +} + +// Readlink implements File.Readlink. +func (c *clientFile) Readlink() (string, error) { + if atomic.LoadUint32(&c.closed) != 0 { + return "", linux.EBADF + } + + rreadlink := rreadlink{} + if err := c.client.sendRecv(&treadlink{fid: c.fid}, &rreadlink); err != nil { + return "", err + } + + return rreadlink.Target, nil +} + +// Renamed implements File.Renamed. +func (c *clientFile) Renamed(newDir File, newName string) {} diff --git a/vendor/github.com/hugelgupf/p9/p9/file.go b/vendor/github.com/hugelgupf/p9/p9/file.go new file mode 100644 index 000000000..b4ba28349 --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/p9/file.go @@ -0,0 +1,274 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package p9 + +import ( + "github.com/hugelgupf/p9/linux" +) + +// Attacher is provided by the server. +type Attacher interface { + // Attach returns a new File. + // + // The client-side attach will be translate to a series of walks from + // the file returned by this Attach call. + Attach() (File, error) +} + +// File is a set of operations corresponding to a single node. +// +// Note that on the server side, the server logic places constraints on +// concurrent operations to make things easier. This may reduce the need for +// complex, error-prone locking and logic in the backend. These are documented +// for each method. +// +// There are three different types of guarantees provided: +// +// none: There is no concurrency guarantee. The method may be invoked +// concurrently with any other method on any other file. +// +// read: The method is guaranteed to be exclusive of any write or global +// operation that is mutating the state of the directory tree starting at this +// node. For example, this means creating new files, symlinks, directories or +// renaming a directory entry (or renaming in to this target), but the method +// may be called concurrently with other read methods. +// +// write: The method is guaranteed to be exclusive of any read, write or global +// operation that is mutating the state of the directory tree starting at this +// node, as described in read above. There may however, be other write +// operations executing concurrently on other components in the directory tree. +// +// global: The method is guaranteed to be exclusive of any read, write or +// global operation. +type File interface { + // Walk walks to the path components given in names. + // + // Walk returns QIDs in the same order that the names were passed in. + // + // An empty list of arguments should return a copy of the current file. + // + // On the server, Walk has a read concurrency guarantee. + Walk(names []string) ([]QID, File, error) + + // WalkGetAttr walks to the next file and returns its maximal set of + // attributes. + // + // Server-side p9.Files may return linux.ENOSYS to indicate that Walk + // and GetAttr should be used separately to satisfy this request. + // + // On the server, WalkGetAttr has a read concurrency guarantee. + WalkGetAttr([]string) ([]QID, File, AttrMask, Attr, error) + + // StatFS returns information about the file system associated with + // this file. + // + // On the server, StatFS has no concurrency guarantee. + StatFS() (FSStat, error) + + // GetAttr returns attributes of this node. + // + // On the server, GetAttr has a read concurrency guarantee. + GetAttr(req AttrMask) (QID, AttrMask, Attr, error) + + // SetAttr sets attributes on this node. + // + // On the server, SetAttr has a write concurrency guarantee. + SetAttr(valid SetAttrMask, attr SetAttr) error + + // Close is called when all references are dropped on the server side, + // and Close should be called by the client to drop all references. + // + // For server-side implementations of Close, the error is ignored. + // + // Close must be called even when Open has not been called. + // + // On the server, Close has no concurrency guarantee. + Close() error + + // Open must be called prior to using ReadAt, WriteAt, or Readdir. Once + // Open is called, some operations, such as Walk, will no longer work. + // + // On the client, Open should be called only once. The fd return is + // optional, and may be nil. + // + // On the server, Open has a read concurrency guarantee. Open is + // guaranteed to be called only once. + // + // N.B. The server must resolve any lazy paths when open is called. + // After this point, read and write may be called on files with no + // deletion check, so resolving in the data path is not viable. + Open(mode OpenFlags) (QID, uint32, error) + + // ReadAt reads from this file. Open must be called first. + // + // This may return io.EOF in addition to linux.Errno values. + // + // On the server, ReadAt has a read concurrency guarantee. See Open for + // additional requirements regarding lazy path resolution. + ReadAt(p []byte, offset int64) (int, error) + + // WriteAt writes to this file. Open must be called first. + // + // This may return io.EOF in addition to linux.Errno values. + // + // On the server, WriteAt has a read concurrency guarantee. See Open + // for additional requirements regarding lazy path resolution. + WriteAt(p []byte, offset int64) (int, error) + + // SetXattr sets the extended attributes attr=data of the file. + // + // Flags are implementation-specific, but are + // generally Linux setxattr(2) flags. + SetXattr(attr string, data []byte, flags XattrFlags) error + + // GetXattr fetches the extended attribute attr of the file. + GetXattr(attr string) ([]byte, error) + + // ListXattrs lists the extended attribute names of the file. + ListXattrs() ([]string, error) + + // RemoveXattr removes the extended attribute attr from the file. + RemoveXattr(attr string) error + + // FSync syncs this node. Open must be called first. + // + // On the server, FSync has a read concurrency guarantee. + FSync() error + + // Lock locks the file. The operation as defined in 9P2000.L is fairly + // ambitious, being a near-direct mapping to lockf(2)/fcntl(2)-style + // locking, but most implementations use flock(2). + // + // Arguments are defined by the 9P2000.L standard. + // + // Pid is the PID on the client. Locktype is one of read, write, or + // unlock (resp. 0, 1, or 2). Flags are to block (0), meaning wait; or + // reclaim (1), which is currently "reserved for future use." Start and + // length are the start of the region to use and the size. In many + // implementations, they are ignored and flock(2) is used. Client is an + // arbitrary string, also frequently unused. The Linux v9fs client + // happens to set the client name to the node name. + // + // The Linux v9fs client implements fcntl(F_SETLK) by calling lock + // without any flags set. + // + // The Linux v9fs client implements the fcntl(F_SETLKW) (blocking) + // lock request by calling lock with P9_LOCK_FLAGS_BLOCK set. If the + // response is P9_LOCK_BLOCKED, it retries the lock request in an + // interruptible loop until status is no longer P9_LOCK_BLOCKED. + // + // The Linux v9fs client translates BSD advisory locks (flock) to + // whole-file POSIX record locks. v9fs does not implement mandatory + // locks and will return ENOLCK if use is attempted. + // + // In the return values, a LockStatus corresponds to an Rlock, while + // returning an error corresponds to an Rlerror message. If any non-nil + // error is returned, an Rlerror message will be sent. + // + // The most commonly used return values are success and error (resp. 0 + // and 2); blocked (1) and grace (3) are also possible. + Lock(pid int, locktype LockType, flags LockFlags, start, length uint64, client string) (LockStatus, error) + + // Create creates a new regular file and opens it according to the + // flags given. This file is already Open. + // + // N.B. On the client, the returned file is a reference to the current + // file, which now represents the created file. This is not the case on + // the server. These semantics are very subtle and can easily lead to + // bugs, but are a consequence of the 9P create operation. + // + // On the server, Create has a write concurrency guarantee. + Create(name string, flags OpenFlags, permissions FileMode, uid UID, gid GID) (File, QID, uint32, error) + + // Mkdir creates a subdirectory. + // + // On the server, Mkdir has a write concurrency guarantee. + Mkdir(name string, permissions FileMode, uid UID, gid GID) (QID, error) + + // Symlink makes a new symbolic link. + // + // On the server, Symlink has a write concurrency guarantee. + Symlink(oldName string, newName string, uid UID, gid GID) (QID, error) + + // Link makes a new hard link. + // + // On the server, Link has a write concurrency guarantee. + Link(target File, newName string) error + + // Mknod makes a new device node. + // + // On the server, Mknod has a write concurrency guarantee. + Mknod(name string, mode FileMode, major uint32, minor uint32, uid UID, gid GID) (QID, error) + + // Rename renames the file. + // + // Rename will never be called on the server, and RenameAt will always + // be used instead. + Rename(newDir File, newName string) error + + // RenameAt renames a given file to a new name in a potentially new + // directory. + // + // oldName must be a name relative to this file, which must be a + // directory. newName is a name relative to newDir. + // + // On the server, RenameAt has a global concurrency guarantee. + RenameAt(oldName string, newDir File, newName string) error + + // UnlinkAt the given named file. + // + // name must be a file relative to this directory. + // + // Flags are implementation-specific (e.g. O_DIRECTORY), but are + // generally Linux unlinkat(2) flags. + // + // On the server, UnlinkAt has a write concurrency guarantee. + UnlinkAt(name string, flags uint32) error + + // Readdir reads directory entries. + // + // offset is the entry offset, and count the number of entries to + // return. + // + // This may return io.EOF in addition to linux.Errno values. + // + // On the server, Readdir has a read concurrency guarantee. + Readdir(offset uint64, count uint32) (Dirents, error) + + // Readlink reads the link target. + // + // On the server, Readlink has a read concurrency guarantee. + Readlink() (string, error) + + // Renamed is called when this node is renamed. + // + // This may not fail. The file will hold a reference to its parent + // within the p9 package, and is therefore safe to use for the lifetime + // of this File (until Close is called). + // + // This method should not be called by clients, who should use the + // relevant Rename methods. (Although the method will be a no-op.) + // + // On the server, Renamed has a global concurrency guarantee. + Renamed(newDir File, newName string) +} + +// DefaultWalkGetAttr implements File.WalkGetAttr to return ENOSYS for server-side Files. +type DefaultWalkGetAttr struct{} + +// WalkGetAttr implements File.WalkGetAttr. +func (DefaultWalkGetAttr) WalkGetAttr([]string) ([]QID, File, AttrMask, Attr, error) { + return nil, nil, AttrMask{}, Attr{}, linux.ENOSYS +} diff --git a/vendor/github.com/hugelgupf/p9/p9/fuzz.go b/vendor/github.com/hugelgupf/p9/p9/fuzz.go new file mode 100644 index 000000000..3e7d21790 --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/p9/fuzz.go @@ -0,0 +1,27 @@ +//go:build gofuzz +// +build gofuzz + +package p9 + +import ( + "bytes" + + "github.com/u-root/uio/ulog" +) + +func Fuzz(data []byte) int { + buf := bytes.NewBuffer(data) + tag, msg, err := recv(ulog.Null, buf, DefaultMessageSize, msgDotLRegistry.get) + if err != nil { + if msg != nil { + panic("msg !=nil on error") + } + return 0 + } + buf.Reset() + send(ulog.Null, buf, tag, msg) + if err != nil { + panic(err) + } + return 1 +} diff --git a/vendor/github.com/hugelgupf/p9/p9/handlers.go b/vendor/github.com/hugelgupf/p9/p9/handlers.go new file mode 100644 index 000000000..4ee6c616f --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/p9/handlers.go @@ -0,0 +1,1390 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package p9 + +import ( + "errors" + "fmt" + "io" + "path" + "strings" + "sync" + "sync/atomic" + + "github.com/hugelgupf/p9/linux" +) + +// newErr returns a new error message from an error. +func newErr(err error) *rlerror { + return &rlerror{Error: uint32(linux.ExtractErrno(err))} +} + +// handler is implemented for server-handled messages. +// +// See server.go for call information. +type handler interface { + // Handle handles the given message. + // + // This may modify the server state. The handle function must return a + // message which will be sent back to the client. It may be useful to + // use newErr to automatically extract an error message. + handle(cs *connState) message +} + +// handle implements handler.handle. +func (t *tversion) handle(cs *connState) message { + // "If the server does not understand the client's version string, it + // should respond with an Rversion message (not Rerror) with the + // version string the 7 characters "unknown"". + // + // - 9P2000 spec. + // + // Makes sense, since there are two different kinds of errors depending on the version. + unknown := &rversion{ + MSize: 0, + Version: "unknown", + } + if t.MSize == 0 { + return unknown + } + msize := t.MSize + if t.MSize > maximumLength { + msize = maximumLength + } + + reqBaseVersion, reqVersion, ok := parseVersion(t.Version) + if !ok { + return unknown + } + var baseVersion baseVersion + var version uint32 + + switch reqBaseVersion { + case version9P2000, version9P2000U: + return unknown + + case version9P2000L: + baseVersion = reqBaseVersion + // The server cannot support newer versions that it doesn't know about. In this + // case we return EAGAIN to tell the client to try again with a lower version. + if reqVersion > highestSupportedVersion { + version = highestSupportedVersion + } else { + version = reqVersion + } + } + + // From Tversion(9P): "The server may respond with the client’s version + // string, or a version string identifying an earlier defined protocol version". + atomic.StoreUint32(&cs.messageSize, msize) + atomic.StoreUint32(&cs.version, version) + // This is not thread-safe. We're changing this into sessions anyway, + // so who cares. + cs.baseVersion = baseVersion + + // Initial a pool with msize-shaped buffers. + cs.readBufPool = sync.Pool{ + New: func() interface{} { + // These buffers are used for decoding without a payload. + // We need to return a pointer to avoid unnecessary allocations + // (see https://staticcheck.io/docs/checks#SA6002). + b := make([]byte, msize) + return &b + }, + } + // Buffer of zeros. + cs.pristineZeros = make([]byte, msize) + + return &rversion{ + MSize: msize, + Version: versionString(baseVersion, version), + } +} + +// handle implements handler.handle. +func (t *tflush) handle(cs *connState) message { + cs.WaitTag(t.OldTag) + return &rflush{} +} + +// checkSafeName validates the name and returns nil or returns an error. +func checkSafeName(name string) error { + if name != "" && !strings.Contains(name, "/") && name != "." && name != ".." { + return nil + } + return linux.EINVAL +} + +func clunkHandleXattr(cs *connState, t *tclunk) message { + // Lookup the fid. + ref, ok := cs.LookupFID(t.fid) + if !ok { + return newErr(linux.EBADF) + } + defer ref.DecRef() + + if err := ref.safelyRead(func() error { + if ref.pendingXattr.op == xattrCreate { + if len(ref.pendingXattr.buf) != int(ref.pendingXattr.size) { + return linux.EINVAL + } + if ref.pendingXattr.flags == XattrReplace && ref.pendingXattr.size == 0 { + return ref.file.RemoveXattr(ref.pendingXattr.name) + } + return ref.file.SetXattr(ref.pendingXattr.name, ref.pendingXattr.buf, ref.pendingXattr.flags) + } + return nil + }); err != nil { + return newErr(err) + } + return nil +} + +// handle implements handler.handle. +func (t *tclunk) handle(cs *connState) message { + cerr := clunkHandleXattr(cs, t) + + if err := cs.DeleteFID(t.fid); err != nil { + return newErr(err) + } + if cerr != nil { + return cerr + } + return &rclunk{} +} + +// handle implements handler.handle. +func (t *tremove) handle(cs *connState) message { + ref, ok := cs.LookupFID(t.fid) + if !ok { + return newErr(linux.EBADF) + } + defer ref.DecRef() + + // Frustratingly, because we can't be guaranteed that a rename is not + // occurring simultaneously with this removal, we need to acquire the + // global rename lock for this kind of remove operation to ensure that + // ref.parent does not change out from underneath us. + // + // This is why Tremove is a bad idea, and clients should generally use + // Tunlinkat. All p9 clients will use Tunlinkat. + err := ref.safelyGlobal(func() error { + // Is this a root? Can't remove that. + if ref.isRoot() { + return linux.EINVAL + } + + // N.B. this remove operation is permitted, even if the file is open. + // See also rename below for reasoning. + + // Is this file already deleted? + if ref.isDeleted() { + return linux.EINVAL + } + + // Retrieve the file's proper name. + name := ref.parent.pathNode.nameFor(ref) + + // Attempt the removal. + if err := ref.parent.file.UnlinkAt(name, 0); err != nil { + return err + } + + // Mark all relevant fids as deleted. We don't need to lock any + // individual nodes because we already hold the global lock. + ref.parent.markChildDeleted(name) + return nil + }) + + // "The remove request asks the file server both to remove the file + // represented by fid and to clunk the fid, even if the remove fails." + // + // "It is correct to consider remove to be a clunk with the side effect + // of removing the file if permissions allow." + // https://swtch.com/plan9port/man/man9/remove.html + if fidErr := cs.DeleteFID(t.fid); fidErr != nil { + return newErr(fidErr) + } + if err != nil { + return newErr(err) + } + + return &rremove{} +} + +// handle implements handler.handle. +// +// We don't support authentication, so this just returns ENOSYS. +func (t *tauth) handle(cs *connState) message { + return newErr(linux.ENOSYS) +} + +// handle implements handler.handle. +func (t *tattach) handle(cs *connState) message { + // Ensure no authentication fid is provided. + if t.Auth.Authenticationfid != noFID { + return newErr(linux.EINVAL) + } + + // Must provide an absolute path. + if path.IsAbs(t.Auth.AttachName) { + // Trim off the leading / if the path is absolute. We always + // treat attach paths as absolute and call attach with the root + // argument on the server file for clarity. + t.Auth.AttachName = t.Auth.AttachName[1:] + } + + // Do the attach on the root. + sf, err := cs.server.attacher.Attach() + if err != nil { + return newErr(err) + } + qid, valid, attr, err := sf.GetAttr(AttrMaskAll) + if err != nil { + sf.Close() // Drop file. + return newErr(err) + } + if !valid.Mode { + sf.Close() // Drop file. + return newErr(linux.EINVAL) + } + + // Build a transient reference. + root := &fidRef{ + server: cs.server, + parent: nil, + file: sf, + refs: 1, + mode: attr.Mode.FileType(), + pathNode: cs.server.pathTree, + } + defer root.DecRef() + + // Attach the root? + if len(t.Auth.AttachName) == 0 { + cs.InsertFID(t.fid, root) + return &rattach{QID: qid} + } + + // We want the same traversal checks to apply on attach, so always + // attach at the root and use the regular walk paths. + names := strings.Split(t.Auth.AttachName, "/") + _, newRef, _, _, err := doWalk(cs, root, names, false) + if err != nil { + return newErr(err) + } + defer newRef.DecRef() + + // Insert the fid. + cs.InsertFID(t.fid, newRef) + return &rattach{QID: qid} +} + +// CanOpen returns whether this file open can be opened, read and written to. +// +// This includes everything except symlinks and sockets. +func CanOpen(mode FileMode) bool { + return mode.IsRegular() || mode.IsDir() || mode.IsNamedPipe() || mode.IsBlockDevice() || mode.IsCharacterDevice() +} + +// handle implements handler.handle. +func (t *tlopen) handle(cs *connState) message { + // Lookup the fid. + ref, ok := cs.LookupFID(t.fid) + if !ok { + return newErr(linux.EBADF) + } + defer ref.DecRef() + + var ( + qid QID + ioUnit uint32 + ) + if err := ref.safelyRead(func() (err error) { + // Has it been deleted already? + if ref.isDeleted() { + return linux.EINVAL + } + + // Has it been opened already? + if ref.opened || !CanOpen(ref.mode) { + return linux.EINVAL + } + + // Is this an attempt to open a directory as writable? Don't accept. + if ref.mode.IsDir() && t.Flags.Mode() != ReadOnly { + return linux.EISDIR + } + + // Do the open. + qid, ioUnit, err = ref.file.Open(t.Flags) + return err + }); err != nil { + return newErr(err) + } + + // Mark file as opened and set open mode. + ref.opened = true + ref.openFlags = t.Flags + + return &rlopen{QID: qid, IoUnit: ioUnit} +} + +func (t *tlcreate) do(cs *connState, uid UID) (*rlcreate, error) { + // Don't allow complex names. + if err := checkSafeName(t.Name); err != nil { + return nil, err + } + + // Lookup the fid. + ref, ok := cs.LookupFID(t.fid) + if !ok { + return nil, linux.EBADF + } + defer ref.DecRef() + + var ( + nsf File + qid QID + ioUnit uint32 + newRef *fidRef + ) + if err := ref.safelyWrite(func() (err error) { + // Don't allow creation from non-directories or deleted directories. + if ref.isDeleted() || !ref.mode.IsDir() { + return linux.EINVAL + } + + // Not allowed on open directories. + if ref.opened { + return linux.EINVAL + } + + // Do the create. + nsf, qid, ioUnit, err = ref.file.Create(t.Name, t.OpenFlags, t.Permissions, uid, t.GID) + if err != nil { + return err + } + + newRef = &fidRef{ + server: cs.server, + parent: ref, + file: nsf, + opened: true, + openFlags: t.OpenFlags, + mode: ModeRegular, + pathNode: ref.pathNode.pathNodeFor(t.Name), + } + ref.pathNode.addChild(newRef, t.Name) + ref.IncRef() // Acquire parent reference. + return nil + }); err != nil { + return nil, err + } + + // Replace the fid reference. + cs.InsertFID(t.fid, newRef) + + return &rlcreate{rlopen: rlopen{QID: qid, IoUnit: ioUnit}}, nil +} + +// handle implements handler.handle. +func (t *tlcreate) handle(cs *connState) message { + rlcreate, err := t.do(cs, NoUID) + if err != nil { + return newErr(err) + } + return rlcreate +} + +// handle implements handler.handle. +func (t *tsymlink) handle(cs *connState) message { + rsymlink, err := t.do(cs, NoUID) + if err != nil { + return newErr(err) + } + return rsymlink +} + +func (t *tsymlink) do(cs *connState, uid UID) (*rsymlink, error) { + // Don't allow complex names. + if err := checkSafeName(t.Name); err != nil { + return nil, err + } + + // Lookup the fid. + ref, ok := cs.LookupFID(t.Directory) + if !ok { + return nil, linux.EBADF + } + defer ref.DecRef() + + var qid QID + if err := ref.safelyWrite(func() (err error) { + // Don't allow symlinks from non-directories or deleted directories. + if ref.isDeleted() || !ref.mode.IsDir() { + return linux.EINVAL + } + + // Not allowed on open directories. + if ref.opened { + return linux.EINVAL + } + + // Do the symlink. + qid, err = ref.file.Symlink(t.Target, t.Name, uid, t.GID) + return err + }); err != nil { + return nil, err + } + + return &rsymlink{QID: qid}, nil +} + +// handle implements handler.handle. +func (t *tlink) handle(cs *connState) message { + // Don't allow complex names. + if err := checkSafeName(t.Name); err != nil { + return newErr(err) + } + + // Lookup the fid. + ref, ok := cs.LookupFID(t.Directory) + if !ok { + return newErr(linux.EBADF) + } + defer ref.DecRef() + + // Lookup the other fid. + refTarget, ok := cs.LookupFID(t.Target) + if !ok { + return newErr(linux.EBADF) + } + defer refTarget.DecRef() + + if err := ref.safelyWrite(func() (err error) { + // Don't allow create links from non-directories or deleted directories. + if ref.isDeleted() || !ref.mode.IsDir() { + return linux.EINVAL + } + + // Not allowed on open directories. + if ref.opened { + return linux.EINVAL + } + + // Do the link. + return ref.file.Link(refTarget.file, t.Name) + }); err != nil { + return newErr(err) + } + + return &rlink{} +} + +// handle implements handler.handle. +func (t *trenameat) handle(cs *connState) message { + // Don't allow complex names. + if err := checkSafeName(t.OldName); err != nil { + return newErr(err) + } + if err := checkSafeName(t.NewName); err != nil { + return newErr(err) + } + + // Lookup the fid. + ref, ok := cs.LookupFID(t.OldDirectory) + if !ok { + return newErr(linux.EBADF) + } + defer ref.DecRef() + + // Lookup the other fid. + refTarget, ok := cs.LookupFID(t.NewDirectory) + if !ok { + return newErr(linux.EBADF) + } + defer refTarget.DecRef() + + // Perform the rename holding the global lock. + if err := ref.safelyGlobal(func() (err error) { + // Don't allow renaming across deleted directories. + if ref.isDeleted() || !ref.mode.IsDir() || refTarget.isDeleted() || !refTarget.mode.IsDir() { + return linux.EINVAL + } + + // Not allowed on open directories. + if ref.opened { + return linux.EINVAL + } + + // Is this the same file? If yes, short-circuit and return success. + if ref.pathNode == refTarget.pathNode && t.OldName == t.NewName { + return nil + } + + // Attempt the actual rename. + if err := ref.file.RenameAt(t.OldName, refTarget.file, t.NewName); err != nil { + return err + } + + // Update the path tree. + ref.renameChildTo(t.OldName, refTarget, t.NewName) + return nil + }); err != nil { + return newErr(err) + } + + return &rrenameat{} +} + +// handle implements handler.handle. +func (t *tunlinkat) handle(cs *connState) message { + // Don't allow complex names. + if err := checkSafeName(t.Name); err != nil { + return newErr(err) + } + + // Lookup the fid. + ref, ok := cs.LookupFID(t.Directory) + if !ok { + return newErr(linux.EBADF) + } + defer ref.DecRef() + + if err := ref.safelyWrite(func() (err error) { + // Don't allow deletion from non-directories or deleted directories. + if ref.isDeleted() || !ref.mode.IsDir() { + return linux.EINVAL + } + + // Not allowed on open directories. + if ref.opened { + return linux.EINVAL + } + + // Before we do the unlink itself, we need to ensure that there + // are no operations in flight on associated path node. The + // child's path node lock must be held to ensure that the + // unlinkat marking the child deleted below is atomic with + // respect to any other read or write operations. + // + // This is one case where we have a lock ordering issue, but + // since we always acquire deeper in the hierarchy, we know + // that we are free of lock cycles. + childPathNode := ref.pathNode.pathNodeFor(t.Name) + childPathNode.opMu.Lock() + defer childPathNode.opMu.Unlock() + + // Do the unlink. + err = ref.file.UnlinkAt(t.Name, t.Flags) + if err != nil { + return err + } + + // Mark the path as deleted. + ref.markChildDeleted(t.Name) + return nil + }); err != nil { + return newErr(err) + } + + return &runlinkat{} +} + +// handle implements handler.handle. +func (t *trename) handle(cs *connState) message { + // Don't allow complex names. + if err := checkSafeName(t.Name); err != nil { + return newErr(err) + } + + // Lookup the fid. + ref, ok := cs.LookupFID(t.fid) + if !ok { + return newErr(linux.EBADF) + } + defer ref.DecRef() + + // Lookup the target. + refTarget, ok := cs.LookupFID(t.Directory) + if !ok { + return newErr(linux.EBADF) + } + defer refTarget.DecRef() + + if err := ref.safelyGlobal(func() (err error) { + // Don't allow a root rename. + if ref.isRoot() { + return linux.EINVAL + } + + // Don't allow renaming deleting entries, or target non-directories. + if ref.isDeleted() || refTarget.isDeleted() || !refTarget.mode.IsDir() { + return linux.EINVAL + } + + // If the parent is deleted, but we not, something is seriously wrong. + // It's fail to die at this point with an assertion failure. + if ref.parent.isDeleted() { + panic(fmt.Sprintf("parent %+v deleted, child %+v is not", ref.parent, ref)) + } + + // N.B. The rename operation is allowed to proceed on open files. It + // does impact the state of its parent, but this is merely a sanity + // check in any case, and the operation is safe. There may be other + // files corresponding to the same path that are renamed anyways. + + // Check for the exact same file and short-circuit. + oldName := ref.parent.pathNode.nameFor(ref) + if ref.parent.pathNode == refTarget.pathNode && oldName == t.Name { + return nil + } + + // Call the rename method on the parent. + if err := ref.parent.file.RenameAt(oldName, refTarget.file, t.Name); err != nil { + return err + } + + // Update the path tree. + ref.parent.renameChildTo(oldName, refTarget, t.Name) + return nil + }); err != nil { + return newErr(err) + } + + return &rrename{} +} + +// handle implements handler.handle. +func (t *treadlink) handle(cs *connState) message { + // Lookup the fid. + ref, ok := cs.LookupFID(t.fid) + if !ok { + return newErr(linux.EBADF) + } + defer ref.DecRef() + + var target string + if err := ref.safelyRead(func() (err error) { + // Don't allow readlink on deleted files. There is no need to + // check if this file is opened because symlinks cannot be + // opened. + if ref.isDeleted() || !ref.mode.IsSymlink() { + return linux.EINVAL + } + + // Do the read. + target, err = ref.file.Readlink() + return err + }); err != nil { + return newErr(err) + } + + return &rreadlink{target} +} + +// handle implements handler.handle. +func (t *tread) handle(cs *connState) message { + // Lookup the fid. + ref, ok := cs.LookupFID(t.fid) + if !ok { + return newErr(linux.EBADF) + } + defer ref.DecRef() + + // Constrain the size of the read buffer. + if int(t.Count) > int(maximumLength) { + return newErr(linux.ENOBUFS) + } + + var n int + data := cs.readBufPool.Get().(*[]byte) + // Retain a reference to the full length of the buffer. + dataBuf := (*data) + if err := ref.safelyRead(func() (err error) { + switch ref.pendingXattr.op { + case xattrNone: + // Has it been opened already? + if !ref.opened { + return linux.EINVAL + } + + // Can it be read? Check permissions. + if ref.openFlags&OpenFlagsModeMask == WriteOnly { + return linux.EPERM + } + + n, err = ref.file.ReadAt(dataBuf[:t.Count], int64(t.Offset)) + return err + + case xattrWalk: + // Make sure we do not pass an empty buffer to GetXattr or ListXattrs. + // Both of them will return the required buffer length if + // the input buffer has length 0. + // tread means the caller already knows the required buffer length + // and wants to get the attribute value. + if t.Count == 0 { + if ref.pendingXattr.size == 0 { + // the provided buffer has length 0 and + // the attribute value is also empty. + return nil + } + // buffer too small. + return linux.EINVAL + } + + if t.Offset+uint64(t.Count) > uint64(len(ref.pendingXattr.buf)) { + return linux.EINVAL + } + + n = copy(dataBuf[:t.Count], ref.pendingXattr.buf[t.Offset:]) + return nil + default: + return linux.EINVAL + } + }); err != nil && !errors.Is(err, io.EOF) { + return newErr(err) + } + + return &rreadServerPayloader{ + rread: rread{ + Data: dataBuf[:n], + }, + cs: cs, + fullBuffer: dataBuf, + } +} + +// handle implements handler.handle. +func (t *twrite) handle(cs *connState) message { + // Lookup the fid. + ref, ok := cs.LookupFID(t.fid) + if !ok { + return newErr(linux.EBADF) + } + defer ref.DecRef() + + var n int + if err := ref.safelyRead(func() (err error) { + switch ref.pendingXattr.op { + case xattrNone: + // Has it been opened already? + if !ref.opened { + return linux.EINVAL + } + + // Can it be written? Check permissions. + if ref.openFlags&OpenFlagsModeMask == ReadOnly { + return linux.EPERM + } + + n, err = ref.file.WriteAt(t.Data, int64(t.Offset)) + + case xattrCreate: + if uint64(len(ref.pendingXattr.buf)) != t.Offset { + return linux.EINVAL + } + if t.Offset+uint64(len(t.Data)) > ref.pendingXattr.size { + return linux.EINVAL + } + ref.pendingXattr.buf = append(ref.pendingXattr.buf, t.Data...) + n = len(t.Data) + + default: + return linux.EINVAL + } + return err + }); err != nil { + return newErr(err) + } + + return &rwrite{Count: uint32(n)} +} + +// handle implements handler.handle. +func (t *tmknod) handle(cs *connState) message { + rmknod, err := t.do(cs, NoUID) + if err != nil { + return newErr(err) + } + return rmknod +} + +func (t *tmknod) do(cs *connState, uid UID) (*rmknod, error) { + // Don't allow complex names. + if err := checkSafeName(t.Name); err != nil { + return nil, err + } + + // Lookup the fid. + ref, ok := cs.LookupFID(t.Directory) + if !ok { + return nil, linux.EBADF + } + defer ref.DecRef() + + var qid QID + if err := ref.safelyWrite(func() (err error) { + // Don't allow mknod on deleted files. + if ref.isDeleted() || !ref.mode.IsDir() { + return linux.EINVAL + } + + // Not allowed on open directories. + if ref.opened { + return linux.EINVAL + } + + // Do the mknod. + qid, err = ref.file.Mknod(t.Name, t.Mode, t.Major, t.Minor, uid, t.GID) + return err + }); err != nil { + return nil, err + } + + return &rmknod{QID: qid}, nil +} + +// handle implements handler.handle. +func (t *tmkdir) handle(cs *connState) message { + rmkdir, err := t.do(cs, NoUID) + if err != nil { + return newErr(err) + } + return rmkdir +} + +func (t *tmkdir) do(cs *connState, uid UID) (*rmkdir, error) { + // Don't allow complex names. + if err := checkSafeName(t.Name); err != nil { + return nil, err + } + + // Lookup the fid. + ref, ok := cs.LookupFID(t.Directory) + if !ok { + return nil, linux.EBADF + } + defer ref.DecRef() + + var qid QID + if err := ref.safelyWrite(func() (err error) { + // Don't allow mkdir on deleted files. + if ref.isDeleted() || !ref.mode.IsDir() { + return linux.EINVAL + } + + // Not allowed on open directories. + if ref.opened { + return linux.EINVAL + } + + // Do the mkdir. + qid, err = ref.file.Mkdir(t.Name, t.Permissions, uid, t.GID) + return err + }); err != nil { + return nil, err + } + + return &rmkdir{QID: qid}, nil +} + +// handle implements handler.handle. +func (t *tgetattr) handle(cs *connState) message { + // Lookup the fid. + ref, ok := cs.LookupFID(t.fid) + if !ok { + return newErr(linux.EBADF) + } + defer ref.DecRef() + + // We allow getattr on deleted files. Depending on the backing + // implementation, it's possible that races exist that might allow + // fetching attributes of other files. But we need to generally allow + // refreshing attributes and this is a minor leak, if at all. + + var ( + qid QID + valid AttrMask + attr Attr + ) + if err := ref.safelyRead(func() (err error) { + qid, valid, attr, err = ref.file.GetAttr(t.AttrMask) + return err + }); err != nil { + return newErr(err) + } + + return &rgetattr{QID: qid, Valid: valid, Attr: attr} +} + +// handle implements handler.handle. +func (t *tsetattr) handle(cs *connState) message { + // Lookup the fid. + ref, ok := cs.LookupFID(t.fid) + if !ok { + return newErr(linux.EBADF) + } + defer ref.DecRef() + + if err := ref.safelyWrite(func() error { + // We don't allow setattr on files that have been deleted. + // This might be technically incorrect, as it's possible that + // there were multiple links and you can still change the + // corresponding inode information. + if ref.isDeleted() { + return linux.EINVAL + } + + // Set the attributes. + return ref.file.SetAttr(t.Valid, t.SetAttr) + }); err != nil { + return newErr(err) + } + + return &rsetattr{} +} + +// handle implements handler.handle. +func (t *txattrwalk) handle(cs *connState) message { + // Lookup the fid. + ref, ok := cs.LookupFID(t.fid) + if !ok { + return newErr(linux.EBADF) + } + defer ref.DecRef() + + size := 0 + if err := ref.safelyRead(func() error { + if ref.isDeleted() { + return linux.EINVAL + } + var buf []byte + var err error + if len(t.Name) > 0 { + buf, err = ref.file.GetXattr(t.Name) + } else { + var xattrs []string + xattrs, err = ref.file.ListXattrs() + if err == nil { + buf = []byte(strings.Join(xattrs, "\000") + "\000") + } + } + if err != nil || uint32(len(buf)) > maximumLength { + return linux.EINVAL + } + size = len(buf) + newRef := &fidRef{ + server: cs.server, + file: ref.file, + pendingXattr: pendingXattr{ + op: xattrWalk, + name: t.Name, + size: uint64(size), + buf: buf, + }, + pathNode: ref.pathNode, + parent: ref.parent, + } + cs.InsertFID(t.newFID, newRef) + return nil + }); err != nil { + return newErr(err) + } + return &rxattrwalk{Size: uint64(size)} +} + +// handle implements handler.handle. +func (t *txattrcreate) handle(cs *connState) message { + // Lookup the fid. + ref, ok := cs.LookupFID(t.fid) + if !ok { + return newErr(linux.EBADF) + } + defer ref.DecRef() + if err := ref.safelyWrite(func() error { + if ref.isDeleted() { + return linux.EINVAL + } + ref.pendingXattr = pendingXattr{ + op: xattrCreate, + name: t.Name, + size: t.AttrSize, + flags: XattrFlags(t.Flags), + } + return nil + }); err != nil { + return newErr(err) + } + return &rxattrcreate{} +} + +// handle implements handler.handle. +func (t *treaddir) handle(cs *connState) message { + // Lookup the fid. + ref, ok := cs.LookupFID(t.Directory) + if !ok { + return newErr(linux.EBADF) + } + defer ref.DecRef() + + var entries []Dirent + if err := ref.safelyRead(func() (err error) { + // Don't allow reading deleted directories. + if ref.isDeleted() || !ref.mode.IsDir() { + return linux.EINVAL + } + + // Has it been opened already? + if !ref.opened { + return linux.EINVAL + } + + // Read the entries. + entries, err = ref.file.Readdir(t.Offset, t.Count) + if err != nil && !errors.Is(err, io.EOF) { + return err + } + return nil + }); err != nil { + return newErr(err) + } + + return &rreaddir{Count: t.Count, Entries: entries} +} + +// handle implements handler.handle. +func (t *tfsync) handle(cs *connState) message { + // Lookup the fid. + ref, ok := cs.LookupFID(t.fid) + if !ok { + return newErr(linux.EBADF) + } + defer ref.DecRef() + + if err := ref.safelyRead(func() (err error) { + // Has it been opened already? + if !ref.opened { + return linux.EINVAL + } + + // Perform the sync. + return ref.file.FSync() + }); err != nil { + return newErr(err) + } + + return &rfsync{} +} + +// handle implements handler.handle. +func (t *tstatfs) handle(cs *connState) message { + // Lookup the fid. + ref, ok := cs.LookupFID(t.fid) + if !ok { + return newErr(linux.EBADF) + } + defer ref.DecRef() + + st, err := ref.file.StatFS() + if err != nil { + return newErr(err) + } + + return &rstatfs{st} +} + +// handle implements handler.handle. +func (t *tlock) handle(cs *connState) message { + // Lookup the fid. + ref, ok := cs.LookupFID(t.fid) + if !ok { + return newErr(linux.EBADF) + } + defer ref.DecRef() + + status, err := ref.file.Lock(int(t.PID), t.Type, t.Flags, t.Start, t.Length, t.Client) + if err != nil { + return newErr(err) + } + return &rlock{Status: status} +} + +// walkOne walks zero or one path elements. +// +// The slice passed as qids is append and returned. +func walkOne(qids []QID, from File, names []string, getattr bool) ([]QID, File, AttrMask, Attr, error) { + nwname := len(names) + if nwname > 1 { + // We require exactly zero or one elements. + return nil, nil, AttrMask{}, Attr{}, linux.EINVAL + } + var ( + localQIDs []QID + sf File + valid AttrMask + attr Attr + err error + ) + switch { + case getattr: + localQIDs, sf, valid, attr, err = from.WalkGetAttr(names) + // Can't put fallthrough in the if because Go. + if !errors.Is(err, linux.ENOSYS) { + break + } + fallthrough + default: + localQIDs, sf, err = from.Walk(names) + if err != nil { + // No way to walk this element. + break + } + if getattr { + _, valid, attr, err = sf.GetAttr(AttrMaskAll) + if err != nil { + // Don't leak the file. + sf.Close() + } + } + } + if err != nil { + // Error walking, don't return anything. + return nil, nil, AttrMask{}, Attr{}, err + } + if nwname == 1 && len(localQIDs) != 1 { + // Expected a single QID. + sf.Close() + return nil, nil, AttrMask{}, Attr{}, linux.EINVAL + } + return append(qids, localQIDs...), sf, valid, attr, nil +} + +// doWalk walks from a given fidRef. +// +// This enforces that all intermediate nodes are walkable (directories). The +// fidRef returned (newRef) has a reference associated with it that is now +// owned by the caller and must be handled appropriately. +func doWalk(cs *connState, ref *fidRef, names []string, getattr bool) (qids []QID, newRef *fidRef, valid AttrMask, attr Attr, err error) { + // Check the names. + for _, name := range names { + err = checkSafeName(name) + if err != nil { + return + } + } + + // validate anything since this is always permitted. + if len(names) == 0 { + var sf File // Temporary. + if err := ref.maybeParent().safelyRead(func() (err error) { + // Clone the single element. + qids, sf, valid, attr, err = walkOne(nil, ref.file, nil, getattr) + if err != nil { + return err + } + + newRef = &fidRef{ + server: cs.server, + parent: ref.parent, + file: sf, + mode: ref.mode, + pathNode: ref.pathNode, + } + if !ref.isRoot() { + if !newRef.isDeleted() { + // Add only if a non-root node; the same node. + ref.parent.pathNode.addChild(newRef, ref.parent.pathNode.nameFor(ref)) + } + ref.parent.IncRef() // Acquire parent reference. + } + // doWalk returns a reference. + newRef.IncRef() + return nil + }); err != nil { + return nil, nil, AttrMask{}, Attr{}, err + } + + // Do not return the new QID. + // walk(5) "nwqid will always be less than or equal to nwname" + return nil, newRef, valid, attr, nil + } + + // Do the walk, one element at a time. + walkRef := ref + walkRef.IncRef() + for i := 0; i < len(names); i++ { + // We won't allow beyond past symlinks; stop here if this isn't + // a proper directory and we have additional paths to walk. + if !walkRef.mode.IsDir() { + walkRef.DecRef() // Drop walk reference; no lock required. + return nil, nil, AttrMask{}, Attr{}, linux.EINVAL + } + + var sf File // Temporary. + if err := walkRef.safelyRead(func() (err error) { + // It is not safe to walk on a deleted directory. It + // could have been replaced with a malicious symlink. + if walkRef.isDeleted() { + // Fail this operation as the result will not + // be meaningful if walkRef is deleted. + return linux.ENOENT + } + + // Pass getattr = true to walkOne since we need the file type for + // newRef. + qids, sf, valid, attr, err = walkOne(qids, walkRef.file, names[i:i+1], true) + if err != nil { + return err + } + + // Note that we don't need to acquire a lock on any of + // these individual instances. That's because they are + // not actually addressable via a fid. They are + // anonymous. They exist in the tree for tracking + // purposes. + newRef := &fidRef{ + server: cs.server, + parent: walkRef, + file: sf, + mode: attr.Mode.FileType(), + pathNode: walkRef.pathNode.pathNodeFor(names[i]), + } + walkRef.pathNode.addChild(newRef, names[i]) + // We allow our walk reference to become the new parent + // reference here and so we don't IncRef. Instead, just + // set walkRef to the newRef above and acquire a new + // walk reference. + walkRef = newRef + walkRef.IncRef() + return nil + }); err != nil { + walkRef.DecRef() // Drop the old walkRef. + return nil, nil, AttrMask{}, Attr{}, err + } + } + + // Success. + return qids, walkRef, valid, attr, nil +} + +// handle implements handler.handle. +func (t *twalk) handle(cs *connState) message { + // Lookup the fid. + ref, ok := cs.LookupFID(t.fid) + if !ok { + return newErr(linux.EBADF) + } + defer ref.DecRef() + + if err := ref.safelyRead(func() error { + // Has it been opened already? + // + // That as OK as long as newFID is different. Note this + // violates the spec, but the Linux client does too, so we have + // little choice. + if ref.opened && t.fid == t.newFID { + return linux.EBUSY + } + return nil + }); err != nil { + return newErr(err) + } + + // Is this an empty list? Handle specially. We don't actually need to + // Do the walk. + qids, newRef, _, _, err := doWalk(cs, ref, t.Names, false) + if err != nil { + return newErr(err) + } + defer newRef.DecRef() + + // Install the new fid. + cs.InsertFID(t.newFID, newRef) + return &rwalk{QIDs: qids} +} + +// handle implements handler.handle. +func (t *twalkgetattr) handle(cs *connState) message { + // Lookup the fid. + ref, ok := cs.LookupFID(t.fid) + if !ok { + return newErr(linux.EBADF) + } + defer ref.DecRef() + + if err := ref.safelyRead(func() error { + // Has it been opened already? + // + // That as OK as long as newFID is different. Note this + // violates the spec, but the Linux client does too, so we have + // little choice. + if ref.opened && t.fid == t.newFID { + return linux.EBUSY + } + return nil + }); err != nil { + return newErr(err) + } + + // Is this an empty list? Handle specially. We don't actually need to + // Do the walk. + qids, newRef, valid, attr, err := doWalk(cs, ref, t.Names, true) + if err != nil { + return newErr(err) + } + defer newRef.DecRef() + + // Install the new fid. + cs.InsertFID(t.newFID, newRef) + return &rwalkgetattr{QIDs: qids, Valid: valid, Attr: attr} +} + +// handle implements handler.handle. +func (t *tucreate) handle(cs *connState) message { + rlcreate, err := t.tlcreate.do(cs, t.UID) + if err != nil { + return newErr(err) + } + return &rucreate{*rlcreate} +} + +// handle implements handler.handle. +func (t *tumkdir) handle(cs *connState) message { + rmkdir, err := t.tmkdir.do(cs, t.UID) + if err != nil { + return newErr(err) + } + return &rumkdir{*rmkdir} +} + +// handle implements handler.handle. +func (t *tusymlink) handle(cs *connState) message { + rsymlink, err := t.tsymlink.do(cs, t.UID) + if err != nil { + return newErr(err) + } + return &rusymlink{*rsymlink} +} + +// handle implements handler.handle. +func (t *tumknod) handle(cs *connState) message { + rmknod, err := t.tmknod.do(cs, t.UID) + if err != nil { + return newErr(err) + } + return &rumknod{*rmknod} +} diff --git a/vendor/github.com/hugelgupf/p9/p9/messages.go b/vendor/github.com/hugelgupf/p9/p9/messages.go new file mode 100644 index 000000000..54feafd46 --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/p9/messages.go @@ -0,0 +1,2348 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package p9 + +import ( + "fmt" + "math" +) + +// ErrInvalidMsgType is returned when an unsupported message type is found. +type ErrInvalidMsgType struct { + msgType +} + +// Error returns a useful string. +func (e *ErrInvalidMsgType) Error() string { + return fmt.Sprintf("invalid message type: %d", e.msgType) +} + +// message is a generic 9P message. +type message interface { + encoder + fmt.Stringer + + // Type returns the message type number. + typ() msgType +} + +// payloader is a special message which may include an inline payload. +type payloader interface { + // FixedSize returns the size of the fixed portion of this message. + FixedSize() uint32 + + // Payload returns the payload for sending. + Payload() []byte + + // SetPayload returns the decoded message. + // + // This is going to be total message size - FixedSize. But this should + // be validated during decode, which will be called after SetPayload. + SetPayload([]byte) + + // PayloadCleanup is called after a payloader message is sent and + // buffers can be reapt. + PayloadCleanup() +} + +// tversion is a version request. +type tversion struct { + // MSize is the message size to use. + MSize uint32 + + // Version is the version string. + // + // For this implementation, this must be 9P2000.L. + Version string +} + +// decode implements encoder.decode. +func (t *tversion) decode(b *buffer) { + t.MSize = b.Read32() + t.Version = b.ReadString() +} + +// encode implements encoder.encode. +func (t *tversion) encode(b *buffer) { + b.Write32(t.MSize) + b.WriteString(t.Version) +} + +// typ implements message.typ. +func (*tversion) typ() msgType { + return msgTversion +} + +// String implements fmt.Stringer. +func (t *tversion) String() string { + return fmt.Sprintf("Tversion{MSize: %d, Version: %s}", t.MSize, t.Version) +} + +// rversion is a version response. +type rversion struct { + // MSize is the negotiated size. + MSize uint32 + + // Version is the negotiated version. + Version string +} + +// decode implements encoder.decode. +func (r *rversion) decode(b *buffer) { + r.MSize = b.Read32() + r.Version = b.ReadString() +} + +// encode implements encoder.encode. +func (r *rversion) encode(b *buffer) { + b.Write32(r.MSize) + b.WriteString(r.Version) +} + +// typ implements message.typ. +func (*rversion) typ() msgType { + return msgRversion +} + +// String implements fmt.Stringer. +func (r *rversion) String() string { + return fmt.Sprintf("Rversion{MSize: %d, Version: %s}", r.MSize, r.Version) +} + +// tflush is a flush request. +type tflush struct { + // OldTag is the tag to wait on. + OldTag tag +} + +// decode implements encoder.decode. +func (t *tflush) decode(b *buffer) { + t.OldTag = b.ReadTag() +} + +// encode implements encoder.encode. +func (t *tflush) encode(b *buffer) { + b.WriteTag(t.OldTag) +} + +// typ implements message.typ. +func (*tflush) typ() msgType { + return msgTflush +} + +// String implements fmt.Stringer. +func (t *tflush) String() string { + return fmt.Sprintf("Tflush{OldTag: %d}", t.OldTag) +} + +// rflush is a flush response. +type rflush struct { +} + +// decode implements encoder.decode. +func (*rflush) decode(b *buffer) { +} + +// encode implements encoder.encode. +func (*rflush) encode(b *buffer) { +} + +// typ implements message.typ. +func (*rflush) typ() msgType { + return msgRflush +} + +// String implements fmt.Stringer. +func (r *rflush) String() string { + return fmt.Sprintf("Rflush{}") +} + +// twalk is a walk request. +type twalk struct { + // fid is the fid to be walked. + fid fid + + // newFID is the resulting fid. + newFID fid + + // Names are the set of names to be walked. + Names []string +} + +// decode implements encoder.decode. +func (t *twalk) decode(b *buffer) { + t.fid = b.ReadFID() + t.newFID = b.ReadFID() + n := b.Read16() + t.Names = t.Names[:0] + for i := 0; i < int(n); i++ { + t.Names = append(t.Names, b.ReadString()) + } +} + +// encode implements encoder.encode. +func (t *twalk) encode(b *buffer) { + b.WriteFID(t.fid) + b.WriteFID(t.newFID) + b.Write16(uint16(len(t.Names))) + for _, name := range t.Names { + b.WriteString(name) + } +} + +// typ implements message.typ. +func (*twalk) typ() msgType { + return msgTwalk +} + +// String implements fmt.Stringer. +func (t *twalk) String() string { + return fmt.Sprintf("Twalk{FID: %d, newFID: %d, Names: %v}", t.fid, t.newFID, t.Names) +} + +// rwalk is a walk response. +type rwalk struct { + // QIDs are the set of QIDs returned. + QIDs []QID +} + +// decode implements encoder.decode. +func (r *rwalk) decode(b *buffer) { + n := b.Read16() + r.QIDs = r.QIDs[:0] + for i := 0; i < int(n); i++ { + var q QID + q.decode(b) + r.QIDs = append(r.QIDs, q) + } +} + +// encode implements encoder.encode. +func (r *rwalk) encode(b *buffer) { + b.Write16(uint16(len(r.QIDs))) + for _, q := range r.QIDs { + q.encode(b) + } +} + +// typ implements message.typ. +func (*rwalk) typ() msgType { + return msgRwalk +} + +// String implements fmt.Stringer. +func (r *rwalk) String() string { + return fmt.Sprintf("Rwalk{QIDs: %v}", r.QIDs) +} + +// tclunk is a close request. +type tclunk struct { + // fid is the fid to be closed. + fid fid +} + +// decode implements encoder.decode. +func (t *tclunk) decode(b *buffer) { + t.fid = b.ReadFID() +} + +// encode implements encoder.encode. +func (t *tclunk) encode(b *buffer) { + b.WriteFID(t.fid) +} + +// typ implements message.typ. +func (*tclunk) typ() msgType { + return msgTclunk +} + +// String implements fmt.Stringer. +func (t *tclunk) String() string { + return fmt.Sprintf("Tclunk{FID: %d}", t.fid) +} + +// rclunk is a close response. +type rclunk struct{} + +// decode implements encoder.decode. +func (*rclunk) decode(b *buffer) { +} + +// encode implements encoder.encode. +func (*rclunk) encode(b *buffer) { +} + +// typ implements message.typ. +func (*rclunk) typ() msgType { + return msgRclunk +} + +// String implements fmt.Stringer. +func (r *rclunk) String() string { + return fmt.Sprintf("Rclunk{}") +} + +// tremove is a remove request. +type tremove struct { + // fid is the fid to be removed. + fid fid +} + +// decode implements encoder.decode. +func (t *tremove) decode(b *buffer) { + t.fid = b.ReadFID() +} + +// encode implements encoder.encode. +func (t *tremove) encode(b *buffer) { + b.WriteFID(t.fid) +} + +// typ implements message.typ. +func (*tremove) typ() msgType { + return msgTremove +} + +// String implements fmt.Stringer. +func (t *tremove) String() string { + return fmt.Sprintf("Tremove{FID: %d}", t.fid) +} + +// rremove is a remove response. +type rremove struct { +} + +// decode implements encoder.decode. +func (*rremove) decode(b *buffer) { +} + +// encode implements encoder.encode. +func (*rremove) encode(b *buffer) { +} + +// typ implements message.typ. +func (*rremove) typ() msgType { + return msgRremove +} + +// String implements fmt.Stringer. +func (r *rremove) String() string { + return fmt.Sprintf("Rremove{}") +} + +// rlerror is an error response. +// +// Note that this replaces the error code used in 9p. +type rlerror struct { + Error uint32 +} + +// decode implements encoder.decode. +func (r *rlerror) decode(b *buffer) { + r.Error = b.Read32() +} + +// encode implements encoder.encode. +func (r *rlerror) encode(b *buffer) { + b.Write32(r.Error) +} + +// typ implements message.typ. +func (*rlerror) typ() msgType { + return msgRlerror +} + +// String implements fmt.Stringer. +func (r *rlerror) String() string { + return fmt.Sprintf("Rlerror{Error: %d}", r.Error) +} + +// tauth is an authentication request. +type tauth struct { + // Authenticationfid is the fid to attach the authentication result. + Authenticationfid fid + + // UserName is the user to attach. + UserName string + + // AttachName is the attach name. + AttachName string + + // UserID is the numeric identifier for UserName. + UID UID +} + +// decode implements encoder.decode. +func (t *tauth) decode(b *buffer) { + t.Authenticationfid = b.ReadFID() + t.UserName = b.ReadString() + t.AttachName = b.ReadString() + t.UID = b.ReadUID() +} + +// encode implements encoder.encode. +func (t *tauth) encode(b *buffer) { + b.WriteFID(t.Authenticationfid) + b.WriteString(t.UserName) + b.WriteString(t.AttachName) + b.WriteUID(t.UID) +} + +// typ implements message.typ. +func (*tauth) typ() msgType { + return msgTauth +} + +// String implements fmt.Stringer. +func (t *tauth) String() string { + return fmt.Sprintf("Tauth{AuthFID: %d, UserName: %s, AttachName: %s, UID: %d", t.Authenticationfid, t.UserName, t.AttachName, t.UID) +} + +// rauth is an authentication response. +// +// encode, decode and Length are inherited directly from QID. +type rauth struct { + QID +} + +// typ implements message.typ. +func (*rauth) typ() msgType { + return msgRauth +} + +// String implements fmt.Stringer. +func (r *rauth) String() string { + return fmt.Sprintf("Rauth{QID: %s}", r.QID) +} + +// tattach is an attach request. +type tattach struct { + // fid is the fid to be attached. + fid fid + + // Auth is the embedded authentication request. + // + // See client.Attach for information regarding authentication. + Auth tauth +} + +// decode implements encoder.decode. +func (t *tattach) decode(b *buffer) { + t.fid = b.ReadFID() + t.Auth.decode(b) +} + +// encode implements encoder.encode. +func (t *tattach) encode(b *buffer) { + b.WriteFID(t.fid) + t.Auth.encode(b) +} + +// typ implements message.typ. +func (*tattach) typ() msgType { + return msgTattach +} + +// String implements fmt.Stringer. +func (t *tattach) String() string { + return fmt.Sprintf("Tattach{FID: %d, AuthFID: %d, UserName: %s, AttachName: %s, UID: %d}", t.fid, t.Auth.Authenticationfid, t.Auth.UserName, t.Auth.AttachName, t.Auth.UID) +} + +// rattach is an attach response. +type rattach struct { + QID +} + +// typ implements message.typ. +func (*rattach) typ() msgType { + return msgRattach +} + +// String implements fmt.Stringer. +func (r *rattach) String() string { + return fmt.Sprintf("Rattach{QID: %s}", r.QID) +} + +// tlopen is an open request. +type tlopen struct { + // fid is the fid to be opened. + fid fid + + // Flags are the open flags. + Flags OpenFlags +} + +// decode implements encoder.decode. +func (t *tlopen) decode(b *buffer) { + t.fid = b.ReadFID() + t.Flags = b.ReadOpenFlags() +} + +// encode implements encoder.encode. +func (t *tlopen) encode(b *buffer) { + b.WriteFID(t.fid) + b.WriteOpenFlags(t.Flags) +} + +// typ implements message.typ. +func (*tlopen) typ() msgType { + return msgTlopen +} + +// String implements fmt.Stringer. +func (t *tlopen) String() string { + return fmt.Sprintf("Tlopen{FID: %d, Flags: %v}", t.fid, t.Flags) +} + +// rlopen is a open response. +type rlopen struct { + // QID is the file's QID. + QID QID + + // IoUnit is the recommended I/O unit. + IoUnit uint32 +} + +// decode implements encoder.decode. +func (r *rlopen) decode(b *buffer) { + r.QID.decode(b) + r.IoUnit = b.Read32() +} + +// encode implements encoder.encode. +func (r *rlopen) encode(b *buffer) { + r.QID.encode(b) + b.Write32(r.IoUnit) +} + +// typ implements message.typ. +func (*rlopen) typ() msgType { + return msgRlopen +} + +// String implements fmt.Stringer. +func (r *rlopen) String() string { + return fmt.Sprintf("Rlopen{QID: %s, IoUnit: %d}", r.QID, r.IoUnit) +} + +// tlcreate is a create request. +type tlcreate struct { + // fid is the parent fid. + // + // This becomes the new file. + fid fid + + // Name is the file name to create. + Name string + + // Mode is the open mode (O_RDWR, etc.). + // + // Note that flags like O_TRUNC are ignored, as is O_EXCL. All + // create operations are exclusive. + OpenFlags OpenFlags + + // Permissions is the set of permission bits. + Permissions FileMode + + // GID is the group ID to use for creating the file. + GID GID +} + +// decode implements encoder.decode. +func (t *tlcreate) decode(b *buffer) { + t.fid = b.ReadFID() + t.Name = b.ReadString() + t.OpenFlags = b.ReadOpenFlags() + t.Permissions = b.ReadPermissions() + t.GID = b.ReadGID() +} + +// encode implements encoder.encode. +func (t *tlcreate) encode(b *buffer) { + b.WriteFID(t.fid) + b.WriteString(t.Name) + b.WriteOpenFlags(t.OpenFlags) + b.WritePermissions(t.Permissions) + b.WriteGID(t.GID) +} + +// typ implements message.typ. +func (*tlcreate) typ() msgType { + return msgTlcreate +} + +// String implements fmt.Stringer. +func (t *tlcreate) String() string { + return fmt.Sprintf("Tlcreate{FID: %d, Name: %s, OpenFlags: %s, Permissions: 0o%o, GID: %d}", t.fid, t.Name, t.OpenFlags, t.Permissions, t.GID) +} + +// rlcreate is a create response. +// +// The encode, decode, etc. methods are inherited from Rlopen. +type rlcreate struct { + rlopen +} + +// typ implements message.typ. +func (*rlcreate) typ() msgType { + return msgRlcreate +} + +// String implements fmt.Stringer. +func (r *rlcreate) String() string { + return fmt.Sprintf("Rlcreate{QID: %s, IoUnit: %d}", r.QID, r.IoUnit) +} + +// tsymlink is a symlink request. +type tsymlink struct { + // Directory is the directory fid. + Directory fid + + // Name is the new in the directory. + Name string + + // Target is the symlink target. + Target string + + // GID is the owning group. + GID GID +} + +// decode implements encoder.decode. +func (t *tsymlink) decode(b *buffer) { + t.Directory = b.ReadFID() + t.Name = b.ReadString() + t.Target = b.ReadString() + t.GID = b.ReadGID() +} + +// encode implements encoder.encode. +func (t *tsymlink) encode(b *buffer) { + b.WriteFID(t.Directory) + b.WriteString(t.Name) + b.WriteString(t.Target) + b.WriteGID(t.GID) +} + +// typ implements message.typ. +func (*tsymlink) typ() msgType { + return msgTsymlink +} + +// String implements fmt.Stringer. +func (t *tsymlink) String() string { + return fmt.Sprintf("Tsymlink{DirectoryFID: %d, Name: %s, Target: %s, GID: %d}", t.Directory, t.Name, t.Target, t.GID) +} + +// rsymlink is a symlink response. +type rsymlink struct { + // QID is the new symlink's QID. + QID QID +} + +// decode implements encoder.decode. +func (r *rsymlink) decode(b *buffer) { + r.QID.decode(b) +} + +// encode implements encoder.encode. +func (r *rsymlink) encode(b *buffer) { + r.QID.encode(b) +} + +// typ implements message.typ. +func (*rsymlink) typ() msgType { + return msgRsymlink +} + +// String implements fmt.Stringer. +func (r *rsymlink) String() string { + return fmt.Sprintf("Rsymlink{QID: %s}", r.QID) +} + +// tlink is a link request. +type tlink struct { + // Directory is the directory to contain the link. + Directory fid + + // fid is the target. + Target fid + + // Name is the new source name. + Name string +} + +// decode implements encoder.decode. +func (t *tlink) decode(b *buffer) { + t.Directory = b.ReadFID() + t.Target = b.ReadFID() + t.Name = b.ReadString() +} + +// encode implements encoder.encode. +func (t *tlink) encode(b *buffer) { + b.WriteFID(t.Directory) + b.WriteFID(t.Target) + b.WriteString(t.Name) +} + +// typ implements message.typ. +func (*tlink) typ() msgType { + return msgTlink +} + +// String implements fmt.Stringer. +func (t *tlink) String() string { + return fmt.Sprintf("Tlink{DirectoryFID: %d, TargetFID: %d, Name: %s}", t.Directory, t.Target, t.Name) +} + +// rlink is a link response. +type rlink struct { +} + +// typ implements message.typ. +func (*rlink) typ() msgType { + return msgRlink +} + +// decode implements encoder.decode. +func (*rlink) decode(b *buffer) { +} + +// encode implements encoder.encode. +func (*rlink) encode(b *buffer) { +} + +// String implements fmt.Stringer. +func (r *rlink) String() string { + return fmt.Sprintf("Rlink{}") +} + +// trenameat is a rename request. +type trenameat struct { + // OldDirectory is the source directory. + OldDirectory fid + + // OldName is the source file name. + OldName string + + // NewDirectory is the target directory. + NewDirectory fid + + // NewName is the new file name. + NewName string +} + +// decode implements encoder.decode. +func (t *trenameat) decode(b *buffer) { + t.OldDirectory = b.ReadFID() + t.OldName = b.ReadString() + t.NewDirectory = b.ReadFID() + t.NewName = b.ReadString() +} + +// encode implements encoder.encode. +func (t *trenameat) encode(b *buffer) { + b.WriteFID(t.OldDirectory) + b.WriteString(t.OldName) + b.WriteFID(t.NewDirectory) + b.WriteString(t.NewName) +} + +// typ implements message.typ. +func (*trenameat) typ() msgType { + return msgTrenameat +} + +// String implements fmt.Stringer. +func (t *trenameat) String() string { + return fmt.Sprintf("TrenameAt{OldDirectoryFID: %d, OldName: %s, NewDirectoryFID: %d, NewName: %s}", t.OldDirectory, t.OldName, t.NewDirectory, t.NewName) +} + +// rrenameat is a rename response. +type rrenameat struct { +} + +// decode implements encoder.decode. +func (*rrenameat) decode(b *buffer) { +} + +// encode implements encoder.encode. +func (*rrenameat) encode(b *buffer) { +} + +// typ implements message.typ. +func (*rrenameat) typ() msgType { + return msgRrenameat +} + +// String implements fmt.Stringer. +func (r *rrenameat) String() string { + return fmt.Sprintf("Rrenameat{}") +} + +// tunlinkat is an unlink request. +type tunlinkat struct { + // Directory is the originating directory. + Directory fid + + // Name is the name of the entry to unlink. + Name string + + // Flags are extra flags (e.g. O_DIRECTORY). These are not interpreted by p9. + Flags uint32 +} + +// decode implements encoder.decode. +func (t *tunlinkat) decode(b *buffer) { + t.Directory = b.ReadFID() + t.Name = b.ReadString() + t.Flags = b.Read32() +} + +// encode implements encoder.encode. +func (t *tunlinkat) encode(b *buffer) { + b.WriteFID(t.Directory) + b.WriteString(t.Name) + b.Write32(t.Flags) +} + +// typ implements message.typ. +func (*tunlinkat) typ() msgType { + return msgTunlinkat +} + +// String implements fmt.Stringer. +func (t *tunlinkat) String() string { + return fmt.Sprintf("Tunlinkat{DirectoryFID: %d, Name: %s, Flags: 0x%X}", t.Directory, t.Name, t.Flags) +} + +// runlinkat is an unlink response. +type runlinkat struct { +} + +// decode implements encoder.decode. +func (*runlinkat) decode(b *buffer) { +} + +// encode implements encoder.encode. +func (*runlinkat) encode(b *buffer) { +} + +// typ implements message.typ. +func (*runlinkat) typ() msgType { + return msgRunlinkat +} + +// String implements fmt.Stringer. +func (r *runlinkat) String() string { + return fmt.Sprintf("Runlinkat{}") +} + +// trename is a rename request. +type trename struct { + // fid is the fid to rename. + fid fid + + // Directory is the target directory. + Directory fid + + // Name is the new file name. + Name string +} + +// decode implements encoder.decode. +func (t *trename) decode(b *buffer) { + t.fid = b.ReadFID() + t.Directory = b.ReadFID() + t.Name = b.ReadString() +} + +// encode implements encoder.encode. +func (t *trename) encode(b *buffer) { + b.WriteFID(t.fid) + b.WriteFID(t.Directory) + b.WriteString(t.Name) +} + +// typ implements message.typ. +func (*trename) typ() msgType { + return msgTrename +} + +// String implements fmt.Stringer. +func (t *trename) String() string { + return fmt.Sprintf("Trename{FID: %d, DirectoryFID: %d, Name: %s}", t.fid, t.Directory, t.Name) +} + +// rrename is a rename response. +type rrename struct { +} + +// decode implements encoder.decode. +func (*rrename) decode(b *buffer) { +} + +// encode implements encoder.encode. +func (*rrename) encode(b *buffer) { +} + +// typ implements message.typ. +func (*rrename) typ() msgType { + return msgRrename +} + +// String implements fmt.Stringer. +func (r *rrename) String() string { + return fmt.Sprintf("Rrename{}") +} + +// treadlink is a readlink request. +type treadlink struct { + // fid is the symlink. + fid fid +} + +// decode implements encoder.decode. +func (t *treadlink) decode(b *buffer) { + t.fid = b.ReadFID() +} + +// encode implements encoder.encode. +func (t *treadlink) encode(b *buffer) { + b.WriteFID(t.fid) +} + +// typ implements message.typ. +func (*treadlink) typ() msgType { + return msgTreadlink +} + +// String implements fmt.Stringer. +func (t *treadlink) String() string { + return fmt.Sprintf("Treadlink{FID: %d}", t.fid) +} + +// rreadlink is a readlink response. +type rreadlink struct { + // Target is the symlink target. + Target string +} + +// decode implements encoder.decode. +func (r *rreadlink) decode(b *buffer) { + r.Target = b.ReadString() +} + +// encode implements encoder.encode. +func (r *rreadlink) encode(b *buffer) { + b.WriteString(r.Target) +} + +// typ implements message.typ. +func (*rreadlink) typ() msgType { + return msgRreadlink +} + +// String implements fmt.Stringer. +func (r *rreadlink) String() string { + return fmt.Sprintf("Rreadlink{Target: %s}", r.Target) +} + +// tread is a read request. +type tread struct { + // fid is the fid to read. + fid fid + + // Offset indicates the file offset. + Offset uint64 + + // Count indicates the number of bytes to read. + Count uint32 +} + +// decode implements encoder.decode. +func (t *tread) decode(b *buffer) { + t.fid = b.ReadFID() + t.Offset = b.Read64() + t.Count = b.Read32() +} + +// encode implements encoder.encode. +func (t *tread) encode(b *buffer) { + b.WriteFID(t.fid) + b.Write64(t.Offset) + b.Write32(t.Count) +} + +// typ implements message.typ. +func (*tread) typ() msgType { + return msgTread +} + +// String implements fmt.Stringer. +func (t *tread) String() string { + return fmt.Sprintf("Tread{FID: %d, Offset: %d, Count: %d}", t.fid, t.Offset, t.Count) +} + +// rreadServerPayloader is the response for a Tread by p9 servers. +// +// rreadServerPayloader exists so the fuzzer can fuzz rread -- however, +// PayloadCleanup causes it to panic, and putting connState in the fuzzer seems +// excessive. +type rreadServerPayloader struct { + rread + + fullBuffer []byte + cs *connState +} + +// rread is the response for a Tread. +type rread struct { + // Data is the resulting data. + Data []byte +} + +// decode implements encoder.decode. +// +// Data is automatically decoded via Payload. +func (r *rread) decode(b *buffer) { + count := b.Read32() + if count != uint32(len(r.Data)) { + b.markOverrun() + } +} + +// encode implements encoder.encode. +// +// Data is automatically encoded via Payload. +func (r *rread) encode(b *buffer) { + b.Write32(uint32(len(r.Data))) +} + +// typ implements message.typ. +func (*rread) typ() msgType { + return msgRread +} + +// FixedSize implements payloader.FixedSize. +func (*rread) FixedSize() uint32 { + return 4 +} + +// Payload implements payloader.Payload. +func (r *rread) Payload() []byte { + return r.Data +} + +// SetPayload implements payloader.SetPayload. +func (r *rread) SetPayload(p []byte) { + r.Data = p +} + +func (*rread) PayloadCleanup() {} + +// FixedSize implements payloader.FixedSize. +func (*rreadServerPayloader) FixedSize() uint32 { + return 4 +} + +// Payload implements payloader.Payload. +func (r *rreadServerPayloader) Payload() []byte { + return r.Data +} + +// SetPayload implements payloader.SetPayload. +func (r *rreadServerPayloader) SetPayload(p []byte) { + r.Data = p +} + +// PayloadCleanup implements payloader.PayloadCleanup. +func (r *rreadServerPayloader) PayloadCleanup() { + // Fill it with zeros to not risk leaking previous files' data. + copy(r.Data, r.cs.pristineZeros) + r.cs.readBufPool.Put(&r.fullBuffer) +} + +// String implements fmt.Stringer. +func (r *rread) String() string { + return fmt.Sprintf("Rread{len(Data): %d}", len(r.Data)) +} + +// twrite is a write request. +type twrite struct { + // fid is the fid to read. + fid fid + + // Offset indicates the file offset. + Offset uint64 + + // Data is the data to be written. + Data []byte +} + +// decode implements encoder.decode. +func (t *twrite) decode(b *buffer) { + t.fid = b.ReadFID() + t.Offset = b.Read64() + count := b.Read32() + if count != uint32(len(t.Data)) { + b.markOverrun() + } +} + +// encode implements encoder.encode. +// +// This uses the buffer payload to avoid a copy. +func (t *twrite) encode(b *buffer) { + b.WriteFID(t.fid) + b.Write64(t.Offset) + b.Write32(uint32(len(t.Data))) +} + +// typ implements message.typ. +func (*twrite) typ() msgType { + return msgTwrite +} + +// FixedSize implements payloader.FixedSize. +func (*twrite) FixedSize() uint32 { + return 16 +} + +// Payload implements payloader.Payload. +func (t *twrite) Payload() []byte { + return t.Data +} + +func (t *twrite) PayloadCleanup() {} + +// SetPayload implements payloader.SetPayload. +func (t *twrite) SetPayload(p []byte) { + t.Data = p +} + +// String implements fmt.Stringer. +func (t *twrite) String() string { + return fmt.Sprintf("Twrite{FID: %v, Offset %d, len(Data): %d}", t.fid, t.Offset, len(t.Data)) +} + +// rwrite is the response for a Twrite. +type rwrite struct { + // Count indicates the number of bytes successfully written. + Count uint32 +} + +// decode implements encoder.decode. +func (r *rwrite) decode(b *buffer) { + r.Count = b.Read32() +} + +// encode implements encoder.encode. +func (r *rwrite) encode(b *buffer) { + b.Write32(r.Count) +} + +// typ implements message.typ. +func (*rwrite) typ() msgType { + return msgRwrite +} + +// String implements fmt.Stringer. +func (r *rwrite) String() string { + return fmt.Sprintf("Rwrite{Count: %d}", r.Count) +} + +// tmknod is a mknod request. +type tmknod struct { + // Directory is the parent directory. + Directory fid + + // Name is the device name. + Name string + + // Mode is the device mode and permissions. + Mode FileMode + + // Major is the device major number. + Major uint32 + + // Minor is the device minor number. + Minor uint32 + + // GID is the device GID. + GID GID +} + +// decode implements encoder.decode. +func (t *tmknod) decode(b *buffer) { + t.Directory = b.ReadFID() + t.Name = b.ReadString() + t.Mode = b.ReadFileMode() + t.Major = b.Read32() + t.Minor = b.Read32() + t.GID = b.ReadGID() +} + +// encode implements encoder.encode. +func (t *tmknod) encode(b *buffer) { + b.WriteFID(t.Directory) + b.WriteString(t.Name) + b.WriteFileMode(t.Mode) + b.Write32(t.Major) + b.Write32(t.Minor) + b.WriteGID(t.GID) +} + +// typ implements message.typ. +func (*tmknod) typ() msgType { + return msgTmknod +} + +// String implements fmt.Stringer. +func (t *tmknod) String() string { + return fmt.Sprintf("Tmknod{DirectoryFID: %d, Name: %s, Mode: 0o%o, Major: %d, Minor: %d, GID: %d}", t.Directory, t.Name, t.Mode, t.Major, t.Minor, t.GID) +} + +// rmknod is a mknod response. +type rmknod struct { + // QID is the resulting QID. + QID QID +} + +// decode implements encoder.decode. +func (r *rmknod) decode(b *buffer) { + r.QID.decode(b) +} + +// encode implements encoder.encode. +func (r *rmknod) encode(b *buffer) { + r.QID.encode(b) +} + +// typ implements message.typ. +func (*rmknod) typ() msgType { + return msgRmknod +} + +// String implements fmt.Stringer. +func (r *rmknod) String() string { + return fmt.Sprintf("Rmknod{QID: %s}", r.QID) +} + +// tmkdir is a mkdir request. +type tmkdir struct { + // Directory is the parent directory. + Directory fid + + // Name is the new directory name. + Name string + + // Permissions is the set of permission bits. + Permissions FileMode + + // GID is the owning group. + GID GID +} + +// decode implements encoder.decode. +func (t *tmkdir) decode(b *buffer) { + t.Directory = b.ReadFID() + t.Name = b.ReadString() + t.Permissions = b.ReadPermissions() + t.GID = b.ReadGID() +} + +// encode implements encoder.encode. +func (t *tmkdir) encode(b *buffer) { + b.WriteFID(t.Directory) + b.WriteString(t.Name) + b.WritePermissions(t.Permissions) + b.WriteGID(t.GID) +} + +// typ implements message.typ. +func (*tmkdir) typ() msgType { + return msgTmkdir +} + +// String implements fmt.Stringer. +func (t *tmkdir) String() string { + return fmt.Sprintf("Tmkdir{DirectoryFID: %d, Name: %s, Permissions: 0o%o, GID: %d}", t.Directory, t.Name, t.Permissions, t.GID) +} + +// rmkdir is a mkdir response. +type rmkdir struct { + // QID is the resulting QID. + QID QID +} + +// decode implements encoder.decode. +func (r *rmkdir) decode(b *buffer) { + r.QID.decode(b) +} + +// encode implements encoder.encode. +func (r *rmkdir) encode(b *buffer) { + r.QID.encode(b) +} + +// typ implements message.typ. +func (*rmkdir) typ() msgType { + return msgRmkdir +} + +// String implements fmt.Stringer. +func (r *rmkdir) String() string { + return fmt.Sprintf("Rmkdir{QID: %s}", r.QID) +} + +// tgetattr is a getattr request. +type tgetattr struct { + // fid is the fid to get attributes for. + fid fid + + // AttrMask is the set of attributes to get. + AttrMask AttrMask +} + +// decode implements encoder.decode. +func (t *tgetattr) decode(b *buffer) { + t.fid = b.ReadFID() + t.AttrMask.decode(b) +} + +// encode implements encoder.encode. +func (t *tgetattr) encode(b *buffer) { + b.WriteFID(t.fid) + t.AttrMask.encode(b) +} + +// typ implements message.typ. +func (*tgetattr) typ() msgType { + return msgTgetattr +} + +// String implements fmt.Stringer. +func (t *tgetattr) String() string { + return fmt.Sprintf("Tgetattr{FID: %d, AttrMask: %s}", t.fid, t.AttrMask) +} + +// rgetattr is a getattr response. +type rgetattr struct { + // Valid indicates which fields are valid. + Valid AttrMask + + // QID is the QID for this file. + QID + + // Attr is the set of attributes. + Attr Attr +} + +// decode implements encoder.decode. +func (r *rgetattr) decode(b *buffer) { + r.Valid.decode(b) + r.QID.decode(b) + r.Attr.decode(b) +} + +// encode implements encoder.encode. +func (r *rgetattr) encode(b *buffer) { + r.Valid.encode(b) + r.QID.encode(b) + r.Attr.encode(b) +} + +// typ implements message.typ. +func (*rgetattr) typ() msgType { + return msgRgetattr +} + +// String implements fmt.Stringer. +func (r *rgetattr) String() string { + return fmt.Sprintf("Rgetattr{Valid: %v, QID: %s, Attr: %s}", r.Valid, r.QID, r.Attr) +} + +// tsetattr is a setattr request. +type tsetattr struct { + // fid is the fid to change. + fid fid + + // Valid is the set of bits which will be used. + Valid SetAttrMask + + // SetAttr is the set request. + SetAttr SetAttr +} + +// decode implements encoder.decode. +func (t *tsetattr) decode(b *buffer) { + t.fid = b.ReadFID() + t.Valid.decode(b) + t.SetAttr.decode(b) +} + +// encode implements encoder.encode. +func (t *tsetattr) encode(b *buffer) { + b.WriteFID(t.fid) + t.Valid.encode(b) + t.SetAttr.encode(b) +} + +// typ implements message.typ. +func (*tsetattr) typ() msgType { + return msgTsetattr +} + +// String implements fmt.Stringer. +func (t *tsetattr) String() string { + return fmt.Sprintf("Tsetattr{FID: %d, Valid: %v, SetAttr: %s}", t.fid, t.Valid, t.SetAttr) +} + +// rsetattr is a setattr response. +type rsetattr struct { +} + +// decode implements encoder.decode. +func (*rsetattr) decode(b *buffer) { +} + +// encode implements encoder.encode. +func (*rsetattr) encode(b *buffer) { +} + +// typ implements message.typ. +func (*rsetattr) typ() msgType { + return msgRsetattr +} + +// String implements fmt.Stringer. +func (r *rsetattr) String() string { + return fmt.Sprintf("Rsetattr{}") +} + +// txattrwalk walks extended attributes. +type txattrwalk struct { + // fid is the fid to check for attributes. + fid fid + + // newFID is the new fid associated with the attributes. + newFID fid + + // Name is the attribute name. + Name string +} + +// decode implements encoder.decode. +func (t *txattrwalk) decode(b *buffer) { + t.fid = b.ReadFID() + t.newFID = b.ReadFID() + t.Name = b.ReadString() +} + +// encode implements encoder.encode. +func (t *txattrwalk) encode(b *buffer) { + b.WriteFID(t.fid) + b.WriteFID(t.newFID) + b.WriteString(t.Name) +} + +// typ implements message.typ. +func (*txattrwalk) typ() msgType { + return msgTxattrwalk +} + +// String implements fmt.Stringer. +func (t *txattrwalk) String() string { + return fmt.Sprintf("Txattrwalk{FID: %d, newFID: %d, Name: %s}", t.fid, t.newFID, t.Name) +} + +// rxattrwalk is a xattrwalk response. +type rxattrwalk struct { + // Size is the size of the extended attribute. + Size uint64 +} + +// decode implements encoder.decode. +func (r *rxattrwalk) decode(b *buffer) { + r.Size = b.Read64() +} + +// encode implements encoder.encode. +func (r *rxattrwalk) encode(b *buffer) { + b.Write64(r.Size) +} + +// typ implements message.typ. +func (*rxattrwalk) typ() msgType { + return msgRxattrwalk +} + +// String implements fmt.Stringer. +func (r *rxattrwalk) String() string { + return fmt.Sprintf("Rxattrwalk{Size: %d}", r.Size) +} + +// txattrcreate prepare to set extended attributes. +type txattrcreate struct { + // fid is input/output parameter, it identifies the file on which + // extended attributes will be set but after successful Rxattrcreate + // it is used to write the extended attribute value. + fid fid + + // Name is the attribute name. + Name string + + // Size of the attribute value. When the fid is clunked it has to match + // the number of bytes written to the fid. + AttrSize uint64 + + // Linux setxattr(2) flags. + Flags uint32 +} + +// decode implements encoder.decode. +func (t *txattrcreate) decode(b *buffer) { + t.fid = b.ReadFID() + t.Name = b.ReadString() + t.AttrSize = b.Read64() + t.Flags = b.Read32() +} + +// encode implements encoder.encode. +func (t *txattrcreate) encode(b *buffer) { + b.WriteFID(t.fid) + b.WriteString(t.Name) + b.Write64(t.AttrSize) + b.Write32(t.Flags) +} + +// typ implements message.typ. +func (*txattrcreate) typ() msgType { + return msgTxattrcreate +} + +// String implements fmt.Stringer. +func (t *txattrcreate) String() string { + return fmt.Sprintf("Txattrcreate{FID: %d, Name: %s, AttrSize: %d, Flags: %d}", t.fid, t.Name, t.AttrSize, t.Flags) +} + +// rxattrcreate is a xattrcreate response. +type rxattrcreate struct { +} + +// decode implements encoder.decode. +func (r *rxattrcreate) decode(b *buffer) { +} + +// encode implements encoder.encode. +func (r *rxattrcreate) encode(b *buffer) { +} + +// typ implements message.typ. +func (*rxattrcreate) typ() msgType { + return msgRxattrcreate +} + +// String implements fmt.Stringer. +func (r *rxattrcreate) String() string { + return fmt.Sprintf("Rxattrcreate{}") +} + +// treaddir is a readdir request. +type treaddir struct { + // Directory is the directory fid to read. + Directory fid + + // Offset is the offset to read at. + Offset uint64 + + // Count is the number of bytes to read. + Count uint32 +} + +// decode implements encoder.decode. +func (t *treaddir) decode(b *buffer) { + t.Directory = b.ReadFID() + t.Offset = b.Read64() + t.Count = b.Read32() +} + +// encode implements encoder.encode. +func (t *treaddir) encode(b *buffer) { + b.WriteFID(t.Directory) + b.Write64(t.Offset) + b.Write32(t.Count) +} + +// typ implements message.typ. +func (*treaddir) typ() msgType { + return msgTreaddir +} + +// String implements fmt.Stringer. +func (t *treaddir) String() string { + return fmt.Sprintf("Treaddir{DirectoryFID: %d, Offset: %d, Count: %d}", t.Directory, t.Offset, t.Count) +} + +// rreaddir is a readdir response. +type rreaddir struct { + // Count is the byte limit. + // + // This should always be set from the Treaddir request. + Count uint32 + + // Entries are the resulting entries. + // + // This may be constructed in decode. + Entries []Dirent + + // payload is the encoded payload. + // + // This is constructed by encode. + payload []byte +} + +// decode implements encoder.decode. +func (r *rreaddir) decode(b *buffer) { + r.Count = b.Read32() + entriesBuf := buffer{data: r.payload} + r.Entries = r.Entries[:0] + for { + var d Dirent + d.decode(&entriesBuf) + if entriesBuf.isOverrun() { + // Couldn't decode a complete entry. + break + } + r.Entries = append(r.Entries, d) + } +} + +// encode implements encoder.encode. +func (r *rreaddir) encode(b *buffer) { + entriesBuf := buffer{} + payloadSize := 0 + for _, d := range r.Entries { + d.encode(&entriesBuf) + if len(entriesBuf.data) > int(r.Count) { + break + } + payloadSize = len(entriesBuf.data) + } + r.Count = uint32(payloadSize) + r.payload = entriesBuf.data[:payloadSize] + b.Write32(r.Count) +} + +// typ implements message.typ. +func (*rreaddir) typ() msgType { + return msgRreaddir +} + +// FixedSize implements payloader.FixedSize. +func (*rreaddir) FixedSize() uint32 { + return 4 +} + +// Payload implements payloader.Payload. +func (r *rreaddir) Payload() []byte { + return r.payload +} + +func (r *rreaddir) PayloadCleanup() {} + +// SetPayload implements payloader.SetPayload. +func (r *rreaddir) SetPayload(p []byte) { + r.payload = p +} + +// String implements fmt.Stringer. +func (r *rreaddir) String() string { + return fmt.Sprintf("Rreaddir{Count: %d, Entries: %s}", r.Count, r.Entries) +} + +// Tfsync is an fsync request. +type tfsync struct { + // fid is the fid to sync. + fid fid +} + +// decode implements encoder.decode. +func (t *tfsync) decode(b *buffer) { + t.fid = b.ReadFID() +} + +// encode implements encoder.encode. +func (t *tfsync) encode(b *buffer) { + b.WriteFID(t.fid) +} + +// typ implements message.typ. +func (*tfsync) typ() msgType { + return msgTfsync +} + +// String implements fmt.Stringer. +func (t *tfsync) String() string { + return fmt.Sprintf("Tfsync{FID: %d}", t.fid) +} + +// rfsync is an fsync response. +type rfsync struct { +} + +// decode implements encoder.decode. +func (*rfsync) decode(b *buffer) { +} + +// encode implements encoder.encode. +func (*rfsync) encode(b *buffer) { +} + +// typ implements message.typ. +func (*rfsync) typ() msgType { + return msgRfsync +} + +// String implements fmt.Stringer. +func (r *rfsync) String() string { + return fmt.Sprintf("Rfsync{}") +} + +// tstatfs is a stat request. +type tstatfs struct { + // fid is the root. + fid fid +} + +// decode implements encoder.decode. +func (t *tstatfs) decode(b *buffer) { + t.fid = b.ReadFID() +} + +// encode implements encoder.encode. +func (t *tstatfs) encode(b *buffer) { + b.WriteFID(t.fid) +} + +// typ implements message.typ. +func (*tstatfs) typ() msgType { + return msgTstatfs +} + +// String implements fmt.Stringer. +func (t *tstatfs) String() string { + return fmt.Sprintf("Tstatfs{FID: %d}", t.fid) +} + +// rstatfs is the response for a Tstatfs. +type rstatfs struct { + // FSStat is the stat result. + FSStat FSStat +} + +// decode implements encoder.decode. +func (r *rstatfs) decode(b *buffer) { + r.FSStat.decode(b) +} + +// encode implements encoder.encode. +func (r *rstatfs) encode(b *buffer) { + r.FSStat.encode(b) +} + +// typ implements message.typ. +func (*rstatfs) typ() msgType { + return msgRstatfs +} + +// String implements fmt.Stringer. +func (r *rstatfs) String() string { + return fmt.Sprintf("Rstatfs{FSStat: %v}", r.FSStat) +} + +// twalkgetattr is a walk request. +type twalkgetattr struct { + // fid is the fid to be walked. + fid fid + + // newFID is the resulting fid. + newFID fid + + // Names are the set of names to be walked. + Names []string +} + +// decode implements encoder.decode. +func (t *twalkgetattr) decode(b *buffer) { + t.fid = b.ReadFID() + t.newFID = b.ReadFID() + n := b.Read16() + t.Names = t.Names[:0] + for i := 0; i < int(n); i++ { + t.Names = append(t.Names, b.ReadString()) + } +} + +// encode implements encoder.encode. +func (t *twalkgetattr) encode(b *buffer) { + b.WriteFID(t.fid) + b.WriteFID(t.newFID) + b.Write16(uint16(len(t.Names))) + for _, name := range t.Names { + b.WriteString(name) + } +} + +// typ implements message.typ. +func (*twalkgetattr) typ() msgType { + return msgTwalkgetattr +} + +// String implements fmt.Stringer. +func (t *twalkgetattr) String() string { + return fmt.Sprintf("Twalkgetattr{FID: %d, newFID: %d, Names: %v}", t.fid, t.newFID, t.Names) +} + +// rwalkgetattr is a walk response. +type rwalkgetattr struct { + // Valid indicates which fields are valid in the Attr below. + Valid AttrMask + + // Attr is the set of attributes for the last QID (the file walked to). + Attr Attr + + // QIDs are the set of QIDs returned. + QIDs []QID +} + +// decode implements encoder.decode. +func (r *rwalkgetattr) decode(b *buffer) { + r.Valid.decode(b) + r.Attr.decode(b) + n := b.Read16() + r.QIDs = r.QIDs[:0] + for i := 0; i < int(n); i++ { + var q QID + q.decode(b) + r.QIDs = append(r.QIDs, q) + } +} + +// encode implements encoder.encode. +func (r *rwalkgetattr) encode(b *buffer) { + r.Valid.encode(b) + r.Attr.encode(b) + b.Write16(uint16(len(r.QIDs))) + for _, q := range r.QIDs { + q.encode(b) + } +} + +// typ implements message.typ. +func (*rwalkgetattr) typ() msgType { + return msgRwalkgetattr +} + +// String implements fmt.Stringer. +func (r *rwalkgetattr) String() string { + return fmt.Sprintf("Rwalkgetattr{Valid: %s, Attr: %s, QIDs: %v}", r.Valid, r.Attr, r.QIDs) +} + +// tucreate is a tlcreate message that includes a UID. +type tucreate struct { + tlcreate + + // UID is the UID to use as the effective UID in creation messages. + UID UID +} + +// decode implements encoder.decode. +func (t *tucreate) decode(b *buffer) { + t.tlcreate.decode(b) + t.UID = b.ReadUID() +} + +// encode implements encoder.encode. +func (t *tucreate) encode(b *buffer) { + t.tlcreate.encode(b) + b.WriteUID(t.UID) +} + +// typ implements message.typ. +func (t *tucreate) typ() msgType { + return msgTucreate +} + +// String implements fmt.Stringer. +func (t *tucreate) String() string { + return fmt.Sprintf("Tucreate{Tlcreate: %v, UID: %d}", &t.tlcreate, t.UID) +} + +// rucreate is a file creation response. +type rucreate struct { + rlcreate +} + +// typ implements message.typ. +func (*rucreate) typ() msgType { + return msgRucreate +} + +// String implements fmt.Stringer. +func (r *rucreate) String() string { + return fmt.Sprintf("Rucreate{%v}", &r.rlcreate) +} + +// tumkdir is a Tmkdir message that includes a UID. +type tumkdir struct { + tmkdir + + // UID is the UID to use as the effective UID in creation messages. + UID UID +} + +// decode implements encoder.decode. +func (t *tumkdir) decode(b *buffer) { + t.tmkdir.decode(b) + t.UID = b.ReadUID() +} + +// encode implements encoder.encode. +func (t *tumkdir) encode(b *buffer) { + t.tmkdir.encode(b) + b.WriteUID(t.UID) +} + +// typ implements message.typ. +func (t *tumkdir) typ() msgType { + return msgTumkdir +} + +// String implements fmt.Stringer. +func (t *tumkdir) String() string { + return fmt.Sprintf("Tumkdir{Tmkdir: %v, UID: %d}", &t.tmkdir, t.UID) +} + +// rumkdir is a umkdir response. +type rumkdir struct { + rmkdir +} + +// typ implements message.typ. +func (*rumkdir) typ() msgType { + return msgRumkdir +} + +// String implements fmt.Stringer. +func (r *rumkdir) String() string { + return fmt.Sprintf("Rumkdir{%v}", &r.rmkdir) +} + +// tumknod is a Tmknod message that includes a UID. +type tumknod struct { + tmknod + + // UID is the UID to use as the effective UID in creation messages. + UID UID +} + +// decode implements encoder.decode. +func (t *tumknod) decode(b *buffer) { + t.tmknod.decode(b) + t.UID = b.ReadUID() +} + +// encode implements encoder.encode. +func (t *tumknod) encode(b *buffer) { + t.tmknod.encode(b) + b.WriteUID(t.UID) +} + +// typ implements message.typ. +func (t *tumknod) typ() msgType { + return msgTumknod +} + +// String implements fmt.Stringer. +func (t *tumknod) String() string { + return fmt.Sprintf("Tumknod{Tmknod: %v, UID: %d}", &t.tmknod, t.UID) +} + +// rumknod is a umknod response. +type rumknod struct { + rmknod +} + +// typ implements message.typ. +func (*rumknod) typ() msgType { + return msgRumknod +} + +// String implements fmt.Stringer. +func (r *rumknod) String() string { + return fmt.Sprintf("Rumknod{%v}", &r.rmknod) +} + +// tusymlink is a Tsymlink message that includes a UID. +type tusymlink struct { + tsymlink + + // UID is the UID to use as the effective UID in creation messages. + UID UID +} + +// decode implements encoder.decode. +func (t *tusymlink) decode(b *buffer) { + t.tsymlink.decode(b) + t.UID = b.ReadUID() +} + +// encode implements encoder.encode. +func (t *tusymlink) encode(b *buffer) { + t.tsymlink.encode(b) + b.WriteUID(t.UID) +} + +// typ implements message.typ. +func (t *tusymlink) typ() msgType { + return msgTusymlink +} + +// String implements fmt.Stringer. +func (t *tusymlink) String() string { + return fmt.Sprintf("Tusymlink{Tsymlink: %v, UID: %d}", &t.tsymlink, t.UID) +} + +// rusymlink is a usymlink response. +type rusymlink struct { + rsymlink +} + +// typ implements message.typ. +func (*rusymlink) typ() msgType { + return msgRusymlink +} + +// String implements fmt.Stringer. +func (r *rusymlink) String() string { + return fmt.Sprintf("Rusymlink{%v}", &r.rsymlink) +} + +// LockType is lock type for Tlock +type LockType uint8 + +// These constants define Lock operations: Read, Write, and Un(lock) +// They map to Linux values of F_RDLCK, F_WRLCK, F_UNLCK. +// If that seems a little Linux-centric, recall that the "L" +// in 9P2000.L means "Linux" :-) +const ( + ReadLock LockType = iota + WriteLock + Unlock +) + +func (l LockType) String() string { + switch l { + case ReadLock: + return "ReadLock" + case WriteLock: + return "WriteLock" + case Unlock: + return "Unlock" + } + return "unknown lock type" +} + +// LockFlags are flags for the lock. Currently, and possibly forever, only one +// is really used: LockFlagsBlock +type LockFlags uint32 + +const ( + // LockFlagsBlock indicates a blocking request. + LockFlagsBlock LockFlags = 1 + + // LockFlagsReclaim is "Reserved for future use." + // It's been some time since 9P2000.L came about, + // I suspect "future" in this case is "never"? + LockFlagsReclaim LockFlags = 2 +) + +// LockStatus contains lock status result. +type LockStatus uint8 + +// These are the four current return values for Rlock. +const ( + LockStatusOK LockStatus = iota + LockStatusBlocked + LockStatusError + LockStatusGrace +) + +func (s LockStatus) String() string { + switch s { + case LockStatusOK: + return "LockStatusOK" + case LockStatusBlocked: + return "LockStatusBlocked" + case LockStatusError: + return "LockStatusError" + case LockStatusGrace: + return "LockStatusGrace" + } + return "unknown lock status" +} + +// tlock is a Tlock message +type tlock struct { + // fid is the fid to lock. + fid fid + + Type LockType // Type of lock: F_RDLCK, F_WRLCK, F_UNLCK */ + Flags LockFlags // flags, not whence, docs are wrong. + Start uint64 // Starting offset for lock + Length uint64 // Number of bytes to lock + PID int32 // PID of process blocking our lock (F_GETLK only) + + // "client_id is an additional mechanism for uniquely + // identifying the lock requester and is set to the nodename + // by the Linux v9fs client." + // https://github.com/chaos/diod/blob/master/protocol.md#lock---acquire-or-release-a-posix-record-lock + Client string // Client id -- but technically can be anything. +} + +// decode implements encoder.decode. +func (t *tlock) decode(b *buffer) { + t.fid = b.ReadFID() + t.Type = LockType(b.Read8()) + t.Flags = LockFlags(b.Read32()) + t.Start = b.Read64() + t.Length = b.Read64() + t.PID = int32(b.Read32()) + t.Client = b.ReadString() +} + +// encode implements encoder.encode. +func (t *tlock) encode(b *buffer) { + b.WriteFID(t.fid) + b.Write8(uint8(t.Type)) + b.Write32(uint32(t.Flags)) + b.Write64(t.Start) + b.Write64(t.Length) + b.Write32(uint32(t.PID)) + b.WriteString(t.Client) +} + +// typ implements message.typ. +func (*tlock) typ() msgType { + return msgTlock +} + +// String implements fmt.Stringer. +func (t *tlock) String() string { + return fmt.Sprintf("Tlock{Type: %s, Flags: %#x, Start: %d, Length: %d, PID: %d, Client: %s}", t.Type.String(), t.Flags, t.Start, t.Length, t.PID, t.Client) +} + +// rlock is a lock response. +type rlock struct { + Status LockStatus +} + +// decode implements encoder.decode. +func (r *rlock) decode(b *buffer) { + r.Status = LockStatus(b.Read8()) +} + +// encode implements encoder.encode. +func (r *rlock) encode(b *buffer) { + b.Write8(uint8(r.Status)) +} + +// typ implements message.typ. +func (*rlock) typ() msgType { + return msgRlock +} + +// String implements fmt.Stringer. +func (r *rlock) String() string { + return fmt.Sprintf("Rlock{Status: %s}", r.Status) +} + +// Let's wait until we need this? POSIX locks over a network make 0 sense. +// getlock - test for the existence of a POSIX record lock +// size[4] Tgetlock tag[2] fid[4] type[1] start[8] length[8] proc_id[4] client_id[s] +// size[4] Rgetlock tag[2] type[1] start[8] length[8] proc_id[4] client_id[s] +// getlock tests for the existence of a POSIX record lock and has semantics similar to Linux fcntl(F_GETLK). + +// As with lock, type has one of the values defined above, and start, +// length, and proc_id correspond to the analogous fields in struct +// flock passed to Linux fcntl(F_GETLK), and client_Id is an +// additional mechanism for uniquely identifying the lock requester +// and is set to the nodename by the Linux v9fs client. tusymlink is +// a Tsymlink message that includes a UID. + +/// END LOCK + +const maxCacheSize = 3 + +// msgFactory is used to reduce allocations by caching messages for reuse. +type msgFactory struct { + create func() message + cache chan message +} + +// msgDotLRegistry indexes all 9P2000.L(.Google.N) message factories by type. +var msgDotLRegistry registry + +type registry struct { + factories [math.MaxUint8 + 1]msgFactory + + // largestFixedSize is computed so that given some message size M, you can + // compute the maximum payload size (e.g. for Twrite, Rread) with + // M-largestFixedSize. You could do this individual on a per-message basis, + // but it's easier to compute a single maximum safe payload. + largestFixedSize uint32 +} + +// get returns a new message by type. +// +// An error is returned in the case of an unknown message. +// +// This takes, and ignores, a message tag so that it may be used directly as a +// lookuptagAndType function for recv (by design). +func (r *registry) get(_ tag, t msgType) (message, error) { + entry := &r.factories[t] + if entry.create == nil { + return nil, &ErrInvalidMsgType{t} + } + + select { + case msg := <-entry.cache: + return msg, nil + default: + return entry.create(), nil + } +} + +func (r *registry) put(msg message) { + if p, ok := msg.(payloader); ok { + p.SetPayload(nil) + } + + entry := &r.factories[msg.typ()] + select { + case entry.cache <- msg: + default: + } +} + +// register registers the given message type. +// +// This may cause panic on failure and should only be used from init. +func (r *registry) register(t msgType, fn func() message) { + if int(t) >= len(r.factories) { + panic(fmt.Sprintf("message type %d is too large. It must be smaller than %d", t, len(r.factories))) + } + if r.factories[t].create != nil { + panic(fmt.Sprintf("duplicate message type %d: first is %T, second is %T", t, r.factories[t].create(), fn())) + } + r.factories[t] = msgFactory{ + create: fn, + cache: make(chan message, maxCacheSize), + } + + if size := calculateSize(fn()); size > r.largestFixedSize { + r.largestFixedSize = size + } +} + +func calculateSize(m message) uint32 { + if p, ok := m.(payloader); ok { + return p.FixedSize() + } + var dataBuf buffer + m.encode(&dataBuf) + return uint32(len(dataBuf.data)) +} + +func init() { + msgDotLRegistry.register(msgRlerror, func() message { return &rlerror{} }) + msgDotLRegistry.register(msgTstatfs, func() message { return &tstatfs{} }) + msgDotLRegistry.register(msgRstatfs, func() message { return &rstatfs{} }) + msgDotLRegistry.register(msgTlopen, func() message { return &tlopen{} }) + msgDotLRegistry.register(msgRlopen, func() message { return &rlopen{} }) + msgDotLRegistry.register(msgTlcreate, func() message { return &tlcreate{} }) + msgDotLRegistry.register(msgRlcreate, func() message { return &rlcreate{} }) + msgDotLRegistry.register(msgTsymlink, func() message { return &tsymlink{} }) + msgDotLRegistry.register(msgRsymlink, func() message { return &rsymlink{} }) + msgDotLRegistry.register(msgTmknod, func() message { return &tmknod{} }) + msgDotLRegistry.register(msgRmknod, func() message { return &rmknod{} }) + msgDotLRegistry.register(msgTrename, func() message { return &trename{} }) + msgDotLRegistry.register(msgRrename, func() message { return &rrename{} }) + msgDotLRegistry.register(msgTreadlink, func() message { return &treadlink{} }) + msgDotLRegistry.register(msgRreadlink, func() message { return &rreadlink{} }) + msgDotLRegistry.register(msgTgetattr, func() message { return &tgetattr{} }) + msgDotLRegistry.register(msgRgetattr, func() message { return &rgetattr{} }) + msgDotLRegistry.register(msgTsetattr, func() message { return &tsetattr{} }) + msgDotLRegistry.register(msgRsetattr, func() message { return &rsetattr{} }) + msgDotLRegistry.register(msgTxattrwalk, func() message { return &txattrwalk{} }) + msgDotLRegistry.register(msgRxattrwalk, func() message { return &rxattrwalk{} }) + msgDotLRegistry.register(msgTxattrcreate, func() message { return &txattrcreate{} }) + msgDotLRegistry.register(msgRxattrcreate, func() message { return &rxattrcreate{} }) + msgDotLRegistry.register(msgTreaddir, func() message { return &treaddir{} }) + msgDotLRegistry.register(msgRreaddir, func() message { return &rreaddir{} }) + msgDotLRegistry.register(msgTfsync, func() message { return &tfsync{} }) + msgDotLRegistry.register(msgRfsync, func() message { return &rfsync{} }) + msgDotLRegistry.register(msgTlink, func() message { return &tlink{} }) + msgDotLRegistry.register(msgRlink, func() message { return &rlink{} }) + msgDotLRegistry.register(msgTlock, func() message { return &tlock{} }) + msgDotLRegistry.register(msgRlock, func() message { return &rlock{} }) + msgDotLRegistry.register(msgTmkdir, func() message { return &tmkdir{} }) + msgDotLRegistry.register(msgRmkdir, func() message { return &rmkdir{} }) + msgDotLRegistry.register(msgTrenameat, func() message { return &trenameat{} }) + msgDotLRegistry.register(msgRrenameat, func() message { return &rrenameat{} }) + msgDotLRegistry.register(msgTunlinkat, func() message { return &tunlinkat{} }) + msgDotLRegistry.register(msgRunlinkat, func() message { return &runlinkat{} }) + msgDotLRegistry.register(msgTversion, func() message { return &tversion{} }) + msgDotLRegistry.register(msgRversion, func() message { return &rversion{} }) + msgDotLRegistry.register(msgTauth, func() message { return &tauth{} }) + msgDotLRegistry.register(msgRauth, func() message { return &rauth{} }) + msgDotLRegistry.register(msgTattach, func() message { return &tattach{} }) + msgDotLRegistry.register(msgRattach, func() message { return &rattach{} }) + msgDotLRegistry.register(msgTflush, func() message { return &tflush{} }) + msgDotLRegistry.register(msgRflush, func() message { return &rflush{} }) + msgDotLRegistry.register(msgTwalk, func() message { return &twalk{} }) + msgDotLRegistry.register(msgRwalk, func() message { return &rwalk{} }) + msgDotLRegistry.register(msgTread, func() message { return &tread{} }) + msgDotLRegistry.register(msgRread, func() message { return &rread{} }) + msgDotLRegistry.register(msgTwrite, func() message { return &twrite{} }) + msgDotLRegistry.register(msgRwrite, func() message { return &rwrite{} }) + msgDotLRegistry.register(msgTclunk, func() message { return &tclunk{} }) + msgDotLRegistry.register(msgRclunk, func() message { return &rclunk{} }) + msgDotLRegistry.register(msgTremove, func() message { return &tremove{} }) + msgDotLRegistry.register(msgRremove, func() message { return &rremove{} }) + msgDotLRegistry.register(msgTwalkgetattr, func() message { return &twalkgetattr{} }) + msgDotLRegistry.register(msgRwalkgetattr, func() message { return &rwalkgetattr{} }) + msgDotLRegistry.register(msgTucreate, func() message { return &tucreate{} }) + msgDotLRegistry.register(msgRucreate, func() message { return &rucreate{} }) + msgDotLRegistry.register(msgTumkdir, func() message { return &tumkdir{} }) + msgDotLRegistry.register(msgRumkdir, func() message { return &rumkdir{} }) + msgDotLRegistry.register(msgTumknod, func() message { return &tumknod{} }) + msgDotLRegistry.register(msgRumknod, func() message { return &rumknod{} }) + msgDotLRegistry.register(msgTusymlink, func() message { return &tusymlink{} }) + msgDotLRegistry.register(msgRusymlink, func() message { return &rusymlink{} }) +} diff --git a/vendor/github.com/hugelgupf/p9/p9/p9.go b/vendor/github.com/hugelgupf/p9/p9/p9.go new file mode 100644 index 000000000..e3c3f3271 --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/p9/p9.go @@ -0,0 +1,1167 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package p9 is a 9P2000.L implementation. +// +// Servers implement Attacher and File interfaces. +// +// Clients can use Client. +package p9 + +import ( + "fmt" + "math" + "os" + "strings" + "sync/atomic" + + "github.com/hugelgupf/p9/internal" +) + +// Debug can be assigned to log.Printf to print messages received and sent. +var Debug = func(fmt string, v ...interface{}) {} + +const ( + // DefaultMessageSize is a sensible default. + DefaultMessageSize uint32 = 64 << 10 +) + +// OpenFlags is the mode passed to Open and Create operations. +// +// These correspond to bits sent over the wire. +type OpenFlags uint32 + +const ( + // ReadOnly is a Topen and Tcreate flag indicating read-only mode. + ReadOnly OpenFlags = 0 + + // WriteOnly is a Topen and Tcreate flag indicating write-only mode. + WriteOnly OpenFlags = 1 + + // ReadWrite is a Topen flag indicates read-write mode. + ReadWrite OpenFlags = 2 + + // OpenFlagsModeMask is a mask of valid OpenFlags mode bits. + OpenFlagsModeMask OpenFlags = 3 +) + +// Mode returns only the open mode (read-only, read-write, or write-only). +func (o OpenFlags) Mode() OpenFlags { + return o & OpenFlagsModeMask +} + +// OSFlags converts a p9.OpenFlags to an int compatible with open(2). +func (o OpenFlags) OSFlags() int { + return int(o & OpenFlagsModeMask) +} + +// String implements fmt.Stringer. +func (o OpenFlags) String() string { + switch o { + case ReadOnly: + return "ReadOnly" + case WriteOnly: + return "WriteOnly" + case ReadWrite: + return "ReadWrite" + case OpenFlagsModeMask: + return "OpenFlagsModeMask" + default: + return fmt.Sprintf("unknown (%#x)", uint32(o)) + } +} + +// XattrFlags are flags set on a setxattr operation. +type XattrFlags int + +const ( + // XattrCreate set on setxattr requires a pure create, which fails if + // the named attribute already exists. + XattrCreate XattrFlags = 1 + + // XattrReplace set on setxattr requires a pure replace, which fails if + // the named attribute does not already exist. + XattrReplace XattrFlags = 2 +) + +// tag is a message tag. +type tag uint16 + +// fid is a file identifier. +type fid uint64 + +// FileMode are flags corresponding to file modes. +// +// These correspond to bits sent over the wire. +// These also correspond to mode_t bits. +type FileMode uint32 + +const ( + // FileModeMask is a mask of all the file mode bits of FileMode. + FileModeMask FileMode = 0170000 + + // ModeSocket is an (unused) mode bit for a socket. + ModeSocket FileMode = 0140000 + + // ModeSymlink is a mode bit for a symlink. + ModeSymlink FileMode = 0120000 + + // ModeRegular is a mode bit for regular files. + ModeRegular FileMode = 0100000 + + // ModeBlockDevice is a mode bit for block devices. + ModeBlockDevice FileMode = 060000 + + // ModeDirectory is a mode bit for directories. + ModeDirectory FileMode = 040000 + + // ModeCharacterDevice is a mode bit for a character device. + ModeCharacterDevice FileMode = 020000 + + // ModeNamedPipe is a mode bit for a named pipe. + ModeNamedPipe FileMode = 010000 + + // Read is a mode bit indicating read permission. + Read FileMode = 04 + + // Write is a mode bit indicating write permission. + Write FileMode = 02 + + // Exec is a mode bit indicating exec permission. + Exec FileMode = 01 + + // AllPermissions is a mask with rwx bits set for user, group and others. + AllPermissions FileMode = 0777 + + // Sticky is a mode bit indicating sticky directories. + Sticky FileMode = 01000 + + // permissionsMask is the mask to apply to FileModes for permissions. It + // includes rwx bits for user, group and others, and sticky bit. + permissionsMask FileMode = 01777 +) + +// QIDType is the most significant byte of the FileMode word, to be used as the +// Type field of p9.QID. +func (m FileMode) QIDType() QIDType { + switch { + case m.IsDir(): + return TypeDir + case m.IsSocket(), m.IsNamedPipe(), m.IsCharacterDevice(): + // Best approximation. + return TypeAppendOnly + case m.IsSymlink(): + return TypeSymlink + default: + return TypeRegular + } +} + +// FileType returns the file mode without the permission bits. +func (m FileMode) FileType() FileMode { + return m & FileModeMask +} + +// Permissions returns just the permission bits of the mode. +func (m FileMode) Permissions() FileMode { + return m & permissionsMask +} + +// Writable returns the mode with write bits added. +func (m FileMode) Writable() FileMode { + return m | 0222 +} + +// IsReadable returns true if m represents a file that can be read. +func (m FileMode) IsReadable() bool { + return m&0444 != 0 +} + +// IsWritable returns true if m represents a file that can be written to. +func (m FileMode) IsWritable() bool { + return m&0222 != 0 +} + +// IsExecutable returns true if m represents a file that can be executed. +func (m FileMode) IsExecutable() bool { + return m&0111 != 0 +} + +// IsRegular returns true if m is a regular file. +func (m FileMode) IsRegular() bool { + return m&FileModeMask == ModeRegular +} + +// IsDir returns true if m represents a directory. +func (m FileMode) IsDir() bool { + return m&FileModeMask == ModeDirectory +} + +// IsNamedPipe returns true if m represents a named pipe. +func (m FileMode) IsNamedPipe() bool { + return m&FileModeMask == ModeNamedPipe +} + +// IsCharacterDevice returns true if m represents a character device. +func (m FileMode) IsCharacterDevice() bool { + return m&FileModeMask == ModeCharacterDevice +} + +// IsBlockDevice returns true if m represents a character device. +func (m FileMode) IsBlockDevice() bool { + return m&FileModeMask == ModeBlockDevice +} + +// IsSocket returns true if m represents a socket. +func (m FileMode) IsSocket() bool { + return m&FileModeMask == ModeSocket +} + +// IsSymlink returns true if m represents a symlink. +func (m FileMode) IsSymlink() bool { + return m&FileModeMask == ModeSymlink +} + +// ModeFromOS returns a FileMode from an os.FileMode. +func ModeFromOS(mode os.FileMode) FileMode { + m := FileMode(mode.Perm()) + switch { + case mode.IsDir(): + m |= ModeDirectory + case mode&os.ModeSymlink != 0: + m |= ModeSymlink + case mode&os.ModeSocket != 0: + m |= ModeSocket + case mode&os.ModeNamedPipe != 0: + m |= ModeNamedPipe + case mode&os.ModeCharDevice != 0: + m |= ModeCharacterDevice + case mode&os.ModeDevice != 0: + m |= ModeBlockDevice + default: + m |= ModeRegular + } + return m +} + +// OSMode converts a p9.FileMode to an os.FileMode. +func (m FileMode) OSMode() os.FileMode { + var osMode os.FileMode + osMode |= os.FileMode(m.Permissions()) + switch { + case m.IsDir(): + osMode |= os.ModeDir + case m.IsSymlink(): + osMode |= os.ModeSymlink + case m.IsSocket(): + osMode |= os.ModeSocket + case m.IsNamedPipe(): + osMode |= os.ModeNamedPipe + case m.IsCharacterDevice(): + osMode |= os.ModeCharDevice | os.ModeDevice + case m.IsBlockDevice(): + osMode |= os.ModeDevice + } + return osMode +} + +// UID represents a user ID. +type UID uint32 + +// Ok returns true if uid is not NoUID. +func (uid UID) Ok() bool { + return uid != NoUID +} + +// GID represents a group ID. +type GID uint32 + +// Ok returns true if gid is not NoGID. +func (gid GID) Ok() bool { + return gid != NoGID +} + +const ( + // notag is a sentinel used to indicate no valid tag. + noTag tag = math.MaxUint16 + + // Nofid is a sentinel used to indicate no valid fid. + noFID fid = math.MaxUint32 + + // NoUID is a sentinel used to indicate no valid UID. + NoUID UID = math.MaxUint32 + + // NoGID is a sentinel used to indicate no valid GID. + NoGID GID = math.MaxUint32 +) + +// msgType is a type identifier. +type msgType uint8 + +// msgType declarations. +const ( + msgRlerror msgType = 7 + msgTstatfs msgType = 8 + msgRstatfs msgType = 9 + msgTlopen msgType = 12 + msgRlopen msgType = 13 + msgTlcreate msgType = 14 + msgRlcreate msgType = 15 + msgTsymlink msgType = 16 + msgRsymlink msgType = 17 + msgTmknod msgType = 18 + msgRmknod msgType = 19 + msgTrename msgType = 20 + msgRrename msgType = 21 + msgTreadlink msgType = 22 + msgRreadlink msgType = 23 + msgTgetattr msgType = 24 + msgRgetattr msgType = 25 + msgTsetattr msgType = 26 + msgRsetattr msgType = 27 + msgTxattrwalk msgType = 30 + msgRxattrwalk msgType = 31 + msgTxattrcreate msgType = 32 + msgRxattrcreate msgType = 33 + msgTreaddir msgType = 40 + msgRreaddir msgType = 41 + msgTfsync msgType = 50 + msgRfsync msgType = 51 + msgTlock msgType = 52 + msgRlock msgType = 53 + msgTgetlock msgType = 54 + msgRgetlock msgType = 55 + msgTlink msgType = 70 + msgRlink msgType = 71 + msgTmkdir msgType = 72 + msgRmkdir msgType = 73 + msgTrenameat msgType = 74 + msgRrenameat msgType = 75 + msgTunlinkat msgType = 76 + msgRunlinkat msgType = 77 + msgTversion msgType = 100 + msgRversion msgType = 101 + msgTauth msgType = 102 + msgRauth msgType = 103 + msgTattach msgType = 104 + msgRattach msgType = 105 + msgTflush msgType = 108 + msgRflush msgType = 109 + msgTwalk msgType = 110 + msgRwalk msgType = 111 + msgTread msgType = 116 + msgRread msgType = 117 + msgTwrite msgType = 118 + msgRwrite msgType = 119 + msgTclunk msgType = 120 + msgRclunk msgType = 121 + msgTremove msgType = 122 + msgRremove msgType = 123 + msgTflushf msgType = 124 + msgRflushf msgType = 125 + msgTwalkgetattr msgType = 126 + msgRwalkgetattr msgType = 127 + msgTucreate msgType = 128 + msgRucreate msgType = 129 + msgTumkdir msgType = 130 + msgRumkdir msgType = 131 + msgTumknod msgType = 132 + msgRumknod msgType = 133 + msgTusymlink msgType = 134 + msgRusymlink msgType = 135 +) + +// QIDType represents the file type for QIDs. +// +// QIDType corresponds to the high 8 bits of a Plan 9 file mode. +type QIDType uint8 + +const ( + // TypeDir represents a directory type. + TypeDir QIDType = 0x80 + + // TypeAppendOnly represents an append only file. + TypeAppendOnly QIDType = 0x40 + + // TypeExclusive represents an exclusive-use file. + TypeExclusive QIDType = 0x20 + + // TypeMount represents a mounted channel. + TypeMount QIDType = 0x10 + + // TypeAuth represents an authentication file. + TypeAuth QIDType = 0x08 + + // TypeTemporary represents a temporary file. + TypeTemporary QIDType = 0x04 + + // TypeSymlink represents a symlink. + TypeSymlink QIDType = 0x02 + + // TypeLink represents a hard link. + TypeLink QIDType = 0x01 + + // TypeRegular represents a regular file. + TypeRegular QIDType = 0x00 +) + +var qidTypeString = map[QIDType]string{ + TypeDir: "QID Type Directory", + TypeAppendOnly: "QID Type Append Only", + TypeExclusive: "QID Type Exclusive", + TypeMount: "QID Type Mount", + TypeAuth: "QID Type Auth", + TypeTemporary: "QID Type Temporary", + TypeSymlink: "QID Type Symlink", + TypeLink: "QID Type Link", + TypeRegular: "QID Type Regular", +} + +func (q QIDType) String() string { + s, ok := qidTypeString[q] + if ok { + return s + } + return fmt.Sprintf("unknown QID type (%#x)", uint8(q)) +} + +// QID is a unique file identifier. +// +// This may be embedded in other requests and responses. +type QID struct { + // Type is the highest order byte of the file mode. + Type QIDType + + // Version is an arbitrary server version number. + Version uint32 + + // Path is a unique server identifier for this path (e.g. inode). + Path uint64 +} + +// String implements fmt.Stringer. +func (q QID) String() string { + return fmt.Sprintf("QID{Type: %d, Version: %d, Path: %d}", q.Type, q.Version, q.Path) +} + +// decode implements encoder.decode. +func (q *QID) decode(b *buffer) { + q.Type = b.ReadQIDType() + q.Version = b.Read32() + q.Path = b.Read64() +} + +// encode implements encoder.encode. +func (q *QID) encode(b *buffer) { + b.WriteQIDType(q.Type) + b.Write32(q.Version) + b.Write64(q.Path) +} + +// QIDGenerator is a simple generator for QIDs that atomically increments Path +// values. +type QIDGenerator struct { + // uids is an ever increasing value that can be atomically incremented + // to provide unique Path values for QIDs. + uids uint64 +} + +// Get returns a new 9P unique ID with a unique Path given a QID type. +// +// While the 9P spec allows Version to be incremented every time the file is +// modified, we currently do not use the Version member for anything. Hence, +// it is set to 0. +func (q *QIDGenerator) Get(t QIDType) QID { + return QID{ + Type: t, + Version: 0, + Path: atomic.AddUint64(&q.uids, 1), + } +} + +// FSStat is used by statfs. +type FSStat struct { + // Type is the filesystem type. + Type uint32 + + // BlockSize is the blocksize. + BlockSize uint32 + + // Blocks is the number of blocks. + Blocks uint64 + + // BlocksFree is the number of free blocks. + BlocksFree uint64 + + // BlocksAvailable is the number of blocks *available*. + BlocksAvailable uint64 + + // Files is the number of files available. + Files uint64 + + // FilesFree is the number of free file nodes. + FilesFree uint64 + + // FSID is the filesystem ID. + FSID uint64 + + // NameLength is the maximum name length. + NameLength uint32 +} + +// decode implements encoder.decode. +func (f *FSStat) decode(b *buffer) { + f.Type = b.Read32() + f.BlockSize = b.Read32() + f.Blocks = b.Read64() + f.BlocksFree = b.Read64() + f.BlocksAvailable = b.Read64() + f.Files = b.Read64() + f.FilesFree = b.Read64() + f.FSID = b.Read64() + f.NameLength = b.Read32() +} + +// encode implements encoder.encode. +func (f *FSStat) encode(b *buffer) { + b.Write32(f.Type) + b.Write32(f.BlockSize) + b.Write64(f.Blocks) + b.Write64(f.BlocksFree) + b.Write64(f.BlocksAvailable) + b.Write64(f.Files) + b.Write64(f.FilesFree) + b.Write64(f.FSID) + b.Write32(f.NameLength) +} + +// AttrMask is a mask of attributes for getattr. +type AttrMask struct { + Mode bool + NLink bool + UID bool + GID bool + RDev bool + ATime bool + MTime bool + CTime bool + INo bool + Size bool + Blocks bool + BTime bool + Gen bool + DataVersion bool +} + +// Contains returns true if a contains all of the attributes masked as b. +func (a AttrMask) Contains(b AttrMask) bool { + if b.Mode && !a.Mode { + return false + } + if b.NLink && !a.NLink { + return false + } + if b.UID && !a.UID { + return false + } + if b.GID && !a.GID { + return false + } + if b.RDev && !a.RDev { + return false + } + if b.ATime && !a.ATime { + return false + } + if b.MTime && !a.MTime { + return false + } + if b.CTime && !a.CTime { + return false + } + if b.INo && !a.INo { + return false + } + if b.Size && !a.Size { + return false + } + if b.Blocks && !a.Blocks { + return false + } + if b.BTime && !a.BTime { + return false + } + if b.Gen && !a.Gen { + return false + } + if b.DataVersion && !a.DataVersion { + return false + } + return true +} + +// Empty returns true if no fields are masked. +func (a AttrMask) Empty() bool { + return !a.Mode && !a.NLink && !a.UID && !a.GID && !a.RDev && !a.ATime && !a.MTime && !a.CTime && !a.INo && !a.Size && !a.Blocks && !a.BTime && !a.Gen && !a.DataVersion +} + +// AttrMaskAll is an AttrMask with all fields masked. +var AttrMaskAll = AttrMask{ + Mode: true, + NLink: true, + UID: true, + GID: true, + RDev: true, + ATime: true, + MTime: true, + CTime: true, + INo: true, + Size: true, + Blocks: true, + BTime: true, + Gen: true, + DataVersion: true, +} + +// String implements fmt.Stringer. +func (a AttrMask) String() string { + var masks []string + if a.Mode { + masks = append(masks, "Mode") + } + if a.NLink { + masks = append(masks, "NLink") + } + if a.UID { + masks = append(masks, "UID") + } + if a.GID { + masks = append(masks, "GID") + } + if a.RDev { + masks = append(masks, "RDev") + } + if a.ATime { + masks = append(masks, "ATime") + } + if a.MTime { + masks = append(masks, "MTime") + } + if a.CTime { + masks = append(masks, "CTime") + } + if a.INo { + masks = append(masks, "INo") + } + if a.Size { + masks = append(masks, "Size") + } + if a.Blocks { + masks = append(masks, "Blocks") + } + if a.BTime { + masks = append(masks, "BTime") + } + if a.Gen { + masks = append(masks, "Gen") + } + if a.DataVersion { + masks = append(masks, "DataVersion") + } + return fmt.Sprintf("AttrMask{with: %s}", strings.Join(masks, " ")) +} + +// decode implements encoder.decode. +func (a *AttrMask) decode(b *buffer) { + mask := b.Read64() + a.Mode = mask&0x00000001 != 0 + a.NLink = mask&0x00000002 != 0 + a.UID = mask&0x00000004 != 0 + a.GID = mask&0x00000008 != 0 + a.RDev = mask&0x00000010 != 0 + a.ATime = mask&0x00000020 != 0 + a.MTime = mask&0x00000040 != 0 + a.CTime = mask&0x00000080 != 0 + a.INo = mask&0x00000100 != 0 + a.Size = mask&0x00000200 != 0 + a.Blocks = mask&0x00000400 != 0 + a.BTime = mask&0x00000800 != 0 + a.Gen = mask&0x00001000 != 0 + a.DataVersion = mask&0x00002000 != 0 +} + +// encode implements encoder.encode. +func (a *AttrMask) encode(b *buffer) { + var mask uint64 + if a.Mode { + mask |= 0x00000001 + } + if a.NLink { + mask |= 0x00000002 + } + if a.UID { + mask |= 0x00000004 + } + if a.GID { + mask |= 0x00000008 + } + if a.RDev { + mask |= 0x00000010 + } + if a.ATime { + mask |= 0x00000020 + } + if a.MTime { + mask |= 0x00000040 + } + if a.CTime { + mask |= 0x00000080 + } + if a.INo { + mask |= 0x00000100 + } + if a.Size { + mask |= 0x00000200 + } + if a.Blocks { + mask |= 0x00000400 + } + if a.BTime { + mask |= 0x00000800 + } + if a.Gen { + mask |= 0x00001000 + } + if a.DataVersion { + mask |= 0x00002000 + } + b.Write64(mask) +} + +// NLink is the number of links to this fs object. +// +// While this type has no utilities, it is useful in order to force linux+amd64 +// only developers to cast to NLink for the NLink field, which will make their +// code compatible with other GOARCH and GOOS values. +type NLink uint64 + +// Dev is the device number of an fs object. +// +// While this type has no utilities, it is useful in order to force linux+amd64 +// only developers to cast to Dev for the Dev field, which will make their +// code compatible with other GOARCH and GOOS values. +type Dev uint64 + +// Attr is a set of attributes for getattr. +type Attr struct { + Mode FileMode + UID UID + GID GID + NLink NLink + RDev Dev + Size uint64 + BlockSize uint64 + Blocks uint64 + ATimeSeconds uint64 + ATimeNanoSeconds uint64 + MTimeSeconds uint64 + MTimeNanoSeconds uint64 + CTimeSeconds uint64 + CTimeNanoSeconds uint64 + BTimeSeconds uint64 + BTimeNanoSeconds uint64 + Gen uint64 + DataVersion uint64 +} + +// String implements fmt.Stringer. +func (a Attr) String() string { + return fmt.Sprintf("Attr{Mode: 0o%o, UID: %d, GID: %d, NLink: %d, RDev: %d, Size: %d, BlockSize: %d, Blocks: %d, ATime: {Sec: %d, NanoSec: %d}, MTime: {Sec: %d, NanoSec: %d}, CTime: {Sec: %d, NanoSec: %d}, BTime: {Sec: %d, NanoSec: %d}, Gen: %d, DataVersion: %d}", + a.Mode, a.UID, a.GID, a.NLink, a.RDev, a.Size, a.BlockSize, a.Blocks, a.ATimeSeconds, a.ATimeNanoSeconds, a.MTimeSeconds, a.MTimeNanoSeconds, a.CTimeSeconds, a.CTimeNanoSeconds, a.BTimeSeconds, a.BTimeNanoSeconds, a.Gen, a.DataVersion) +} + +// Apply applies this to the given Attr. +func (a Attr) WithMask(mask AttrMask) Attr { + var b Attr + if mask.Mode { + b.Mode = a.Mode + } + if mask.NLink { + b.NLink = a.NLink + } + if mask.UID { + b.UID = a.UID + } + if mask.GID { + b.GID = a.GID + } + if mask.RDev { + b.RDev = a.RDev + } + if mask.ATime { + b.ATimeSeconds = a.ATimeSeconds + b.ATimeNanoSeconds = a.ATimeNanoSeconds + } + if mask.MTime { + b.MTimeSeconds = a.MTimeSeconds + b.MTimeNanoSeconds = a.MTimeNanoSeconds + } + if mask.CTime { + b.CTimeSeconds = a.CTimeSeconds + b.CTimeNanoSeconds = a.CTimeNanoSeconds + } + + // Unclear on mask.INo. It corresponds to the inode number, but the + // inode number really is subsumed in the QID's path field normally and + // not accessible via GetAttr anyway. + + if mask.Size { + b.Size = a.Size + } + if mask.Blocks { + b.Blocks = a.Blocks + // I don't know if Size or Blocks fills in BlockSize. + b.BlockSize = a.BlockSize + } + if mask.BTime { + b.BTimeSeconds = a.BTimeSeconds + b.BTimeNanoSeconds = a.BTimeNanoSeconds + } + if mask.Gen { + b.Gen = a.Gen + } + if mask.DataVersion { + b.DataVersion = a.DataVersion + } + return b +} + +// encode implements encoder.encode. +func (a *Attr) encode(b *buffer) { + b.WriteFileMode(a.Mode) + b.WriteUID(a.UID) + b.WriteGID(a.GID) + b.Write64(uint64(a.NLink)) + b.Write64(uint64(a.RDev)) + b.Write64(a.Size) + b.Write64(a.BlockSize) + b.Write64(a.Blocks) + b.Write64(a.ATimeSeconds) + b.Write64(a.ATimeNanoSeconds) + b.Write64(a.MTimeSeconds) + b.Write64(a.MTimeNanoSeconds) + b.Write64(a.CTimeSeconds) + b.Write64(a.CTimeNanoSeconds) + b.Write64(a.BTimeSeconds) + b.Write64(a.BTimeNanoSeconds) + b.Write64(a.Gen) + b.Write64(a.DataVersion) +} + +// decode implements encoder.decode. +func (a *Attr) decode(b *buffer) { + a.Mode = b.ReadFileMode() + a.UID = b.ReadUID() + a.GID = b.ReadGID() + a.NLink = NLink(b.Read64()) + a.RDev = Dev(b.Read64()) + a.Size = b.Read64() + a.BlockSize = b.Read64() + a.Blocks = b.Read64() + a.ATimeSeconds = b.Read64() + a.ATimeNanoSeconds = b.Read64() + a.MTimeSeconds = b.Read64() + a.MTimeNanoSeconds = b.Read64() + a.CTimeSeconds = b.Read64() + a.CTimeNanoSeconds = b.Read64() + a.BTimeSeconds = b.Read64() + a.BTimeNanoSeconds = b.Read64() + a.Gen = b.Read64() + a.DataVersion = b.Read64() +} + +// StatToAttr converts a Linux syscall stat structure to an Attr. +func StatToAttr(s *internal.Stat_t, req AttrMask) (Attr, AttrMask) { + attr := Attr{ + UID: NoUID, + GID: NoGID, + } + if req.Mode { + // p9.FileMode corresponds to Linux mode_t. + attr.Mode = FileMode(s.Mode) + } + if req.NLink { + attr.NLink = NLink(s.Nlink) + } + if req.UID { + attr.UID = UID(s.Uid) + } + if req.GID { + attr.GID = GID(s.Gid) + } + if req.RDev { + attr.RDev = Dev(s.Dev) + } + if req.ATime { + attr.ATimeSeconds = uint64(s.Atim.Sec) + attr.ATimeNanoSeconds = uint64(s.Atim.Nsec) + } + if req.MTime { + attr.MTimeSeconds = uint64(s.Mtim.Sec) + attr.MTimeNanoSeconds = uint64(s.Mtim.Nsec) + } + if req.CTime { + attr.CTimeSeconds = uint64(s.Ctim.Sec) + attr.CTimeNanoSeconds = uint64(s.Ctim.Nsec) + } + if req.Size { + attr.Size = uint64(s.Size) + } + if req.Blocks { + attr.BlockSize = uint64(s.Blksize) + attr.Blocks = uint64(s.Blocks) + } + + // Use the req field because we already have it. + req.BTime = false + req.Gen = false + req.DataVersion = false + + return attr, req +} + +// SetAttrMask specifies a valid mask for setattr. +type SetAttrMask struct { + Permissions bool + UID bool + GID bool + Size bool + ATime bool + MTime bool + CTime bool + ATimeNotSystemTime bool + MTimeNotSystemTime bool +} + +// IsSubsetOf returns whether s is a subset of m. +func (s SetAttrMask) IsSubsetOf(m SetAttrMask) bool { + sb := s.bitmask() + sm := m.bitmask() + return sm|sb == sm +} + +// String implements fmt.Stringer. +func (s SetAttrMask) String() string { + var masks []string + if s.Permissions { + masks = append(masks, "Permissions") + } + if s.UID { + masks = append(masks, "UID") + } + if s.GID { + masks = append(masks, "GID") + } + if s.Size { + masks = append(masks, "Size") + } + if s.ATime { + masks = append(masks, "ATime") + } + if s.MTime { + masks = append(masks, "MTime") + } + if s.CTime { + masks = append(masks, "CTime") + } + if s.ATimeNotSystemTime { + masks = append(masks, "ATimeNotSystemTime") + } + if s.MTimeNotSystemTime { + masks = append(masks, "MTimeNotSystemTime") + } + return fmt.Sprintf("SetAttrMask{with: %s}", strings.Join(masks, " ")) +} + +// Empty returns true if no fields are masked. +func (s SetAttrMask) Empty() bool { + return !s.Permissions && !s.UID && !s.GID && !s.Size && !s.ATime && !s.MTime && !s.CTime && !s.ATimeNotSystemTime && !s.MTimeNotSystemTime +} + +// decode implements encoder.decode. +func (s *SetAttrMask) decode(b *buffer) { + mask := b.Read32() + s.Permissions = mask&0x00000001 != 0 + s.UID = mask&0x00000002 != 0 + s.GID = mask&0x00000004 != 0 + s.Size = mask&0x00000008 != 0 + s.ATime = mask&0x00000010 != 0 + s.MTime = mask&0x00000020 != 0 + s.CTime = mask&0x00000040 != 0 + s.ATimeNotSystemTime = mask&0x00000080 != 0 + s.MTimeNotSystemTime = mask&0x00000100 != 0 +} + +func (s SetAttrMask) bitmask() uint32 { + var mask uint32 + if s.Permissions { + mask |= 0x00000001 + } + if s.UID { + mask |= 0x00000002 + } + if s.GID { + mask |= 0x00000004 + } + if s.Size { + mask |= 0x00000008 + } + if s.ATime { + mask |= 0x00000010 + } + if s.MTime { + mask |= 0x00000020 + } + if s.CTime { + mask |= 0x00000040 + } + if s.ATimeNotSystemTime { + mask |= 0x00000080 + } + if s.MTimeNotSystemTime { + mask |= 0x00000100 + } + return mask +} + +// encode implements encoder.encode. +func (s *SetAttrMask) encode(b *buffer) { + b.Write32(s.bitmask()) +} + +// SetAttr specifies a set of attributes for a setattr. +type SetAttr struct { + Permissions FileMode + UID UID + GID GID + Size uint64 + ATimeSeconds uint64 + ATimeNanoSeconds uint64 + MTimeSeconds uint64 + MTimeNanoSeconds uint64 +} + +// String implements fmt.Stringer. +func (s SetAttr) String() string { + return fmt.Sprintf("SetAttr{Permissions: 0o%o, UID: %d, GID: %d, Size: %d, ATime: {Sec: %d, NanoSec: %d}, MTime: {Sec: %d, NanoSec: %d}}", s.Permissions, s.UID, s.GID, s.Size, s.ATimeSeconds, s.ATimeNanoSeconds, s.MTimeSeconds, s.MTimeNanoSeconds) +} + +// decode implements encoder.decode. +func (s *SetAttr) decode(b *buffer) { + s.Permissions = b.ReadPermissions() + s.UID = b.ReadUID() + s.GID = b.ReadGID() + s.Size = b.Read64() + s.ATimeSeconds = b.Read64() + s.ATimeNanoSeconds = b.Read64() + s.MTimeSeconds = b.Read64() + s.MTimeNanoSeconds = b.Read64() +} + +// encode implements encoder.encode. +func (s *SetAttr) encode(b *buffer) { + b.WritePermissions(s.Permissions) + b.WriteUID(s.UID) + b.WriteGID(s.GID) + b.Write64(s.Size) + b.Write64(s.ATimeSeconds) + b.Write64(s.ATimeNanoSeconds) + b.Write64(s.MTimeSeconds) + b.Write64(s.MTimeNanoSeconds) +} + +// Apply applies this to the given Attr. +func (a *Attr) Apply(mask SetAttrMask, attr SetAttr) { + if mask.Permissions { + a.Mode = a.Mode&^permissionsMask | (attr.Permissions & permissionsMask) + } + if mask.UID { + a.UID = attr.UID + } + if mask.GID { + a.GID = attr.GID + } + if mask.Size { + a.Size = attr.Size + } + if mask.ATime { + a.ATimeSeconds = attr.ATimeSeconds + a.ATimeNanoSeconds = attr.ATimeNanoSeconds + } + if mask.MTime { + a.MTimeSeconds = attr.MTimeSeconds + a.MTimeNanoSeconds = attr.MTimeNanoSeconds + } +} + +// Dirents is a collection of directory entries. +type Dirents []Dirent + +// Find returns a Dirent with the given name if it exists, or nil. +func (d Dirents) Find(name string) *Dirent { + for _, dir := range d { + if dir.Name == name { + return &dir + } + } + return nil +} + +// Dirent represents a directory entry in File.Readdir. +type Dirent struct { + // QID is the entry QID. + QID QID + + // Offset is the offset in the directory. + // + // This will be communicated back the original caller. + Offset uint64 + + // Type is the 9P type. + Type QIDType + + // Name is the name of the entry (i.e. basename). + Name string +} + +// String implements fmt.Stringer. +func (d Dirent) String() string { + return fmt.Sprintf("Dirent{QID: %d, Offset: %d, Type: 0x%X, Name: %s}", d.QID, d.Offset, d.Type, d.Name) +} + +// decode implements encoder.decode. +func (d *Dirent) decode(b *buffer) { + d.QID.decode(b) + d.Offset = b.Read64() + d.Type = b.ReadQIDType() + d.Name = b.ReadString() +} + +// encode implements encoder.encode. +func (d *Dirent) encode(b *buffer) { + d.QID.encode(b) + b.Write64(d.Offset) + b.WriteQIDType(d.Type) + b.WriteString(d.Name) +} diff --git a/vendor/github.com/hugelgupf/p9/p9/path_tree.go b/vendor/github.com/hugelgupf/p9/p9/path_tree.go new file mode 100644 index 000000000..745de5138 --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/p9/path_tree.go @@ -0,0 +1,238 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package p9 + +import ( + "fmt" + "sync" +) + +// pathNode is a single node in a path traversal. +// +// These are shared by all fidRefs that point to the same path. +// +// Lock ordering: +// +// opMu +// childMu +// +// Two different pathNodes may only be locked if Server.renameMu is held for +// write, in which case they can be acquired in any order. +type pathNode struct { + // opMu synchronizes high-level, sematic operations, such as the + // simultaneous creation and deletion of a file. + opMu sync.RWMutex + + // deleted indicates that the backing file has been deleted. We stop many + // operations at the API level if they are incompatible with a file that has + // already been unlinked. deleted is protected by opMu. However, it may be + // changed without opMu if this node is deleted as part of an entire subtree + // on unlink. So deleted must only be accessed/mutated using atomics. + deleted uint32 + + // childMu protects the fields below. + childMu sync.RWMutex + + // childNodes maps child path component names to their pathNode. + childNodes map[string]*pathNode + + // childRefs maps child path component names to all of the their + // references. + childRefs map[string]map[*fidRef]struct{} + + // childRefNames maps child references back to their path component + // name. + childRefNames map[*fidRef]string +} + +func newPathNode() *pathNode { + return &pathNode{ + childNodes: make(map[string]*pathNode), + childRefs: make(map[string]map[*fidRef]struct{}), + childRefNames: make(map[*fidRef]string), + } +} + +// forEachChildRef calls fn for each child reference. +func (p *pathNode) forEachChildRef(fn func(ref *fidRef, name string)) { + p.childMu.RLock() + defer p.childMu.RUnlock() + + for name, m := range p.childRefs { + for ref := range m { + fn(ref, name) + } + } +} + +// forEachChildNode calls fn for each child pathNode. +func (p *pathNode) forEachChildNode(fn func(pn *pathNode)) { + p.childMu.RLock() + defer p.childMu.RUnlock() + + for _, pn := range p.childNodes { + fn(pn) + } +} + +// pathNodeFor returns the path node for the given name, or a new one. +func (p *pathNode) pathNodeFor(name string) *pathNode { + p.childMu.RLock() + // Fast path, node already exists. + if pn, ok := p.childNodes[name]; ok { + p.childMu.RUnlock() + return pn + } + p.childMu.RUnlock() + + // Slow path, create a new pathNode for shared use. + p.childMu.Lock() + + // Re-check after re-lock. + if pn, ok := p.childNodes[name]; ok { + p.childMu.Unlock() + return pn + } + + pn := newPathNode() + p.childNodes[name] = pn + p.childMu.Unlock() + return pn +} + +// nameFor returns the name for the given fidRef. +// +// Precondition: addChild is called for ref before nameFor. +func (p *pathNode) nameFor(ref *fidRef) string { + p.childMu.RLock() + n, ok := p.childRefNames[ref] + p.childMu.RUnlock() + + if !ok { + // This should not happen, don't proceed. + panic(fmt.Sprintf("expected name for %+v, none found", ref)) + } + + return n +} + +// addChildLocked adds a child reference to p. +// +// Precondition: As addChild, plus childMu is locked for write. +func (p *pathNode) addChildLocked(ref *fidRef, name string) { + if n, ok := p.childRefNames[ref]; ok { + // This should not happen, don't proceed. + panic(fmt.Sprintf("unexpected fidRef %+v with path %q, wanted %q", ref, n, name)) + } + + p.childRefNames[ref] = name + + m, ok := p.childRefs[name] + if !ok { + m = make(map[*fidRef]struct{}) + p.childRefs[name] = m + } + + m[ref] = struct{}{} +} + +// addChild adds a child reference to p. +// +// Precondition: ref may only be added once at a time. +func (p *pathNode) addChild(ref *fidRef, name string) { + p.childMu.Lock() + p.addChildLocked(ref, name) + p.childMu.Unlock() +} + +// removeChild removes the given child. +// +// This applies only to an individual fidRef, which is not required to exist. +func (p *pathNode) removeChild(ref *fidRef) { + p.childMu.Lock() + + // This ref may not exist anymore. This can occur, e.g., in unlink, + // where a removeWithName removes the ref, and then a DecRef on the ref + // attempts to remove again. + if name, ok := p.childRefNames[ref]; ok { + m, ok := p.childRefs[name] + if !ok { + // This should not happen, don't proceed. + p.childMu.Unlock() + panic(fmt.Sprintf("name %s missing from childfidRefs", name)) + } + + delete(m, ref) + if len(m) == 0 { + delete(p.childRefs, name) + } + } + + delete(p.childRefNames, ref) + + p.childMu.Unlock() +} + +// addPathNodeFor adds an existing pathNode as the node for name. +// +// Preconditions: newName does not exist. +func (p *pathNode) addPathNodeFor(name string, pn *pathNode) { + p.childMu.Lock() + + if opn, ok := p.childNodes[name]; ok { + p.childMu.Unlock() + panic(fmt.Sprintf("unexpected pathNode %+v with path %q", opn, name)) + } + + p.childNodes[name] = pn + p.childMu.Unlock() +} + +// removeWithName removes all references with the given name. +// +// The provided function is executed after reference removal. The only method +// it may (transitively) call on this pathNode is addChildLocked. +// +// If a child pathNode for name exists, it is removed from this pathNode and +// returned by this function. Any operations on the removed tree must use this +// value. +func (p *pathNode) removeWithName(name string, fn func(ref *fidRef)) *pathNode { + p.childMu.Lock() + defer p.childMu.Unlock() + + if m, ok := p.childRefs[name]; ok { + for ref := range m { + delete(m, ref) + delete(p.childRefNames, ref) + if fn == nil { + continue + } + + // Attempt to hold a reference while calling fn() to + // prevent concurrent destruction of the child, which + // can lead to data races. If the child has already + // been destroyed, then we can skip the callback. + if ref.TryIncRef() { + fn(ref) + ref.DecRef() + } + } + } + + // Return the original path node, if it exists. + origPathNode := p.childNodes[name] + delete(p.childNodes, name) + return origPathNode +} diff --git a/vendor/github.com/hugelgupf/p9/p9/pool.go b/vendor/github.com/hugelgupf/p9/p9/pool.go new file mode 100644 index 000000000..0aa274f11 --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/p9/pool.go @@ -0,0 +1,65 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package p9 + +import ( + "sync" +) + +// pool is a simple allocator. +// +// It is used for both tags and FIDs. +type pool struct { + mu sync.Mutex + + // cache is the set of returned values. + cache []uint64 + + // start is the starting value (if needed). + start uint64 + + // limit is the upper limit. + limit uint64 +} + +// Get gets a value from the pool. +func (p *pool) Get() (uint64, bool) { + p.mu.Lock() + defer p.mu.Unlock() + + // Anything cached? + if len(p.cache) > 0 { + v := p.cache[len(p.cache)-1] + p.cache = p.cache[:len(p.cache)-1] + return v, true + } + + // Over the limit? + if p.start == p.limit { + return 0, false + } + + // Generate a new value. + v := p.start + p.start++ + return v, true +} + +// Put returns a value to the pool. +func (p *pool) Put(v uint64) { + p.mu.Lock() + p.cache = append(p.cache, v) + p.mu.Unlock() +} diff --git a/vendor/github.com/hugelgupf/p9/p9/server.go b/vendor/github.com/hugelgupf/p9/p9/server.go new file mode 100644 index 000000000..c6e9ff365 --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/p9/server.go @@ -0,0 +1,681 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package p9 + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "runtime/debug" + "strings" + "sync" + "sync/atomic" + + "github.com/hugelgupf/p9/linux" + "github.com/u-root/uio/ulog" +) + +// Server is a 9p2000.L server. +type Server struct { + // attacher provides the attach function. + attacher Attacher + + // pathTree is the full set of paths opened on this server. + // + // These may be across different connections, but rename operations + // must be serialized globally for safely. There is a single pathTree + // for the entire server, and not per connection. + pathTree *pathNode + + // renameMu is a global lock protecting rename operations. With this + // lock, we can be certain that any given rename operation can safely + // acquire two path nodes in any order, as all other concurrent + // operations acquire at most a single node. + renameMu sync.RWMutex + + // log is a logger to log to, if specified. + log ulog.Logger +} + +// ServerOpt is an optional config for a new server. +type ServerOpt func(s *Server) + +// WithServerLogger overrides the default logger for the server. +func WithServerLogger(l ulog.Logger) ServerOpt { + return func(s *Server) { + s.log = l + } +} + +// NewServer returns a new server. +func NewServer(attacher Attacher, o ...ServerOpt) *Server { + s := &Server{ + attacher: attacher, + pathTree: newPathNode(), + log: ulog.Null, + } + for _, opt := range o { + opt(s) + } + return s +} + +// connState is the state for a single connection. +type connState struct { + // server is the backing server. + server *Server + + // fids is the set of active fids. + // + // This is used to find fids for files. + fidMu sync.Mutex + fids map[fid]*fidRef + + // tags is the set of active tags. + // + // The given channel is closed when the + // tag is finished with processing. + tagMu sync.Mutex + tags map[tag]chan struct{} + + // messageSize is the maximum message size. The server does not + // do automatic splitting of messages. + messageSize uint32 + readBufPool sync.Pool + pristineZeros []byte + + // baseVersion is the version of 9P protocol. + baseVersion baseVersion + + // version is the agreed upon version X of 9P2000.L.Google.X. + // version 0 implies 9P2000.L. + version uint32 + + // pendingWg counts requests that are still being handled. + pendingWg sync.WaitGroup + + // recvMu serializes receiving from t. + recvMu sync.Mutex + + // recvIdle is the number of goroutines in handleRequests() attempting to + // lock recvMu so that they can receive from t. recvIdle is accessed + // using atomic memory operations. + recvIdle int32 + + // If recvShutdown is true, at least one goroutine has observed a + // connection error while receiving from t, and all goroutines in + // handleRequests() should exit immediately. recvShutdown is protected + // by recvMu. + recvShutdown bool + + // sendMu serializes sending to r. + sendMu sync.Mutex + + // t reads T messages and r write R messages + t io.ReadCloser + r io.WriteCloser +} + +// xattrOp is the xattr related operations, walk or create. +type xattrOp int + +const ( + xattrNone = 0 + xattrCreate = 1 + xattrWalk = 2 +) + +type pendingXattr struct { + // the pending xattr-related operation + op xattrOp + + // name is the attribute. + name string + + // size of the attribute value, represents the + // length of the attribute value that is going to write to or read from a file. + size uint64 + + // flags associated with a txattrcreate message. + // generally Linux setxattr(2) flags. + flags XattrFlags + + // saved up xattr operation value (for reads, listed / gotten buffer -- + // ready for chunking; for writes, this is used to accumulate chunked + // values until a Tclunk actuates the operation) + buf []byte +} + +// fidRef wraps a node and tracks references. +type fidRef struct { + // server is the associated server. + server *Server + + // file is the associated File. + file File + + // pendingXattr is the xattr-related operations that are going to be done + // in a tread or twrite request. + pendingXattr pendingXattr + + // refs is an active refence count. + // + // The node above will be closed only when refs reaches zero. + refs int64 + + // opened indicates whether this has been opened already. + // + // This is updated in handlers.go. + // + // opened is protected by pathNode.opMu or renameMu (for write). + opened bool + + // mode is the fidRef's mode from the walk. Only the type bits are + // valid, the permissions may change. This is used to sanity check + // operations on this element, and prevent walks across + // non-directories. + mode FileMode + + // openFlags is the mode used in the open. + // + // This is updated in handlers.go. + // + // opened is protected by pathNode.opMu or renameMu (for write). + openFlags OpenFlags + + // pathNode is the current pathNode for this fid. + pathNode *pathNode + + // parent is the parent fidRef. We hold on to a parent reference to + // ensure that hooks, such as Renamed, can be executed safely by the + // server code. + // + // Note that parent cannot be changed without holding both the global + // rename lock and a writable lock on the associated pathNode for this + // fidRef. Holding either of these locks is sufficient to examine + // parent safely. + // + // The parent will be nil for root fidRefs, and non-nil otherwise. The + // method maybeParent can be used to return a cyclical reference, and + // isRoot should be used to check for root over looking at parent + // directly. + parent *fidRef +} + +// IncRef increases the references on a fid. +func (f *fidRef) IncRef() { + atomic.AddInt64(&f.refs, 1) +} + +// DecRef should be called when you're finished with a fid. +func (f *fidRef) DecRef() error { + if atomic.AddInt64(&f.refs, -1) == 0 { + var ( + errs []error + err = f.file.Close() + ) + if err != nil { + err = fmt.Errorf("file: %w", err) + errs = append(errs, err) + } + + // Drop the parent reference. + // + // Since this fidRef is guaranteed to be non-discoverable when + // the references reach zero, we don't need to worry about + // clearing the parent. + if f.parent != nil { + // If we've been previously deleted, removing this + // ref is a no-op. That's expected. + f.parent.pathNode.removeChild(f) + if pErr := f.parent.DecRef(); pErr != nil { + pErr = fmt.Errorf("parent: %w", pErr) + errs = append(errs, pErr) + } + } + return errors.Join(errs...) + } + return nil +} + +// TryIncRef returns true if a new reference is taken on the fid, and false if +// the fid has been destroyed. +func (f *fidRef) TryIncRef() bool { + for { + r := atomic.LoadInt64(&f.refs) + if r <= 0 { + return false + } + if atomic.CompareAndSwapInt64(&f.refs, r, r+1) { + return true + } + } +} + +// isDeleted returns true if this fidRef has been deleted. +// +// Precondition: this must be called via safelyRead, safelyWrite or +// safelyGlobal. +func (f *fidRef) isDeleted() bool { + return atomic.LoadUint32(&f.pathNode.deleted) != 0 +} + +// isRoot indicates whether this is a root fid. +func (f *fidRef) isRoot() bool { + return f.parent == nil +} + +// maybeParent returns a cyclic reference for roots, and the parent otherwise. +func (f *fidRef) maybeParent() *fidRef { + if f.parent != nil { + return f.parent + } + return f // Root has itself. +} + +// notifyDelete marks all fidRefs as deleted. +// +// Precondition: this must be called via safelyWrite or safelyGlobal. +func notifyDelete(pn *pathNode) { + atomic.StoreUint32(&pn.deleted, 1) + + // Call on all subtrees. + pn.forEachChildNode(func(pn *pathNode) { + notifyDelete(pn) + }) +} + +// markChildDeleted marks all children below the given name as deleted. +// +// Precondition: this must be called via safelyWrite or safelyGlobal. +func (f *fidRef) markChildDeleted(name string) { + if origPathNode := f.pathNode.removeWithName(name, nil); origPathNode != nil { + // Mark all children as deleted. + notifyDelete(origPathNode) + } +} + +// notifyNameChange calls the relevant Renamed method on all nodes in the path, +// recursively. Note that this applies only for subtrees, as these +// notifications do not apply to the actual file whose name has changed. +// +// Precondition: this must be called via safelyGlobal. +func notifyNameChange(pn *pathNode) { + // Call on all local references. + pn.forEachChildRef(func(ref *fidRef, name string) { + ref.file.Renamed(ref.parent.file, name) + }) + + // Call on all subtrees. + pn.forEachChildNode(func(pn *pathNode) { + notifyNameChange(pn) + }) +} + +// renameChildTo renames the given child to the target. +// +// Precondition: this must be called via safelyGlobal. +func (f *fidRef) renameChildTo(oldName string, target *fidRef, newName string) { + target.markChildDeleted(newName) + origPathNode := f.pathNode.removeWithName(oldName, func(ref *fidRef) { + // N.B. DecRef can take f.pathNode's parent's childMu. This is + // allowed because renameMu is held for write via safelyGlobal. + ref.parent.DecRef() // Drop original reference. + ref.parent = target // Change parent. + ref.parent.IncRef() // Acquire new one. + if f.pathNode == target.pathNode { + target.pathNode.addChildLocked(ref, newName) + } else { + target.pathNode.addChild(ref, newName) + } + ref.file.Renamed(target.file, newName) + }) + + if origPathNode != nil { + // Replace the previous (now deleted) path node. + target.pathNode.addPathNodeFor(newName, origPathNode) + // Call Renamed on all children. + notifyNameChange(origPathNode) + } +} + +// safelyRead executes the given operation with the local path node locked. +// This implies that paths will not change during the operation. +func (f *fidRef) safelyRead(fn func() error) (err error) { + f.server.renameMu.RLock() + defer f.server.renameMu.RUnlock() + f.pathNode.opMu.RLock() + defer f.pathNode.opMu.RUnlock() + return fn() +} + +// safelyWrite executes the given operation with the local path node locked in +// a writable fashion. This implies some paths may change. +func (f *fidRef) safelyWrite(fn func() error) (err error) { + f.server.renameMu.RLock() + defer f.server.renameMu.RUnlock() + f.pathNode.opMu.Lock() + defer f.pathNode.opMu.Unlock() + return fn() +} + +// safelyGlobal executes the given operation with the global path lock held. +func (f *fidRef) safelyGlobal(fn func() error) (err error) { + f.server.renameMu.Lock() + defer f.server.renameMu.Unlock() + return fn() +} + +// Lookupfid finds the given fid. +// +// You should call fid.DecRef when you are finished using the fid. +func (cs *connState) LookupFID(fid fid) (*fidRef, bool) { + cs.fidMu.Lock() + defer cs.fidMu.Unlock() + fidRef, ok := cs.fids[fid] + if ok { + fidRef.IncRef() + return fidRef, true + } + return nil, false +} + +// Insertfid installs the given fid. +// +// This fid starts with a reference count of one. If a fid exists in +// the slot already it is closed, per the specification. +func (cs *connState) InsertFID(fid fid, newRef *fidRef) { + cs.fidMu.Lock() + defer cs.fidMu.Unlock() + origRef, ok := cs.fids[fid] + if ok { + defer origRef.DecRef() + } + newRef.IncRef() + cs.fids[fid] = newRef +} + +// Deletefid removes the given fid. +// +// This simply removes it from the map and drops a reference. +func (cs *connState) DeleteFID(fid fid) error { + cs.fidMu.Lock() + defer cs.fidMu.Unlock() + fidRef, ok := cs.fids[fid] + if !ok { + return linux.EBADF + } + delete(cs.fids, fid) + return fidRef.DecRef() +} + +// StartTag starts handling the tag. +// +// False is returned if this tag is already active. +func (cs *connState) StartTag(t tag) bool { + cs.tagMu.Lock() + defer cs.tagMu.Unlock() + _, ok := cs.tags[t] + if ok { + return false + } + cs.tags[t] = make(chan struct{}) + return true +} + +// ClearTag finishes handling a tag. +func (cs *connState) ClearTag(t tag) { + cs.tagMu.Lock() + defer cs.tagMu.Unlock() + ch, ok := cs.tags[t] + if !ok { + // Should never happen. + panic("unused tag cleared") + } + delete(cs.tags, t) + + // Notify. + close(ch) +} + +// Waittag waits for a tag to finish. +func (cs *connState) WaitTag(t tag) { + cs.tagMu.Lock() + ch, ok := cs.tags[t] + cs.tagMu.Unlock() + if !ok { + return + } + + // Wait for close. + <-ch +} + +// handleRequest handles a single request. +// +// The recvDone channel is signaled when recv is done (with a error if +// necessary). The sendDone channel is signaled with the result of the send. +func (cs *connState) handleRequest() bool { + cs.pendingWg.Add(1) + defer cs.pendingWg.Done() + + // Obtain the right to receive a message from cs.t. + atomic.AddInt32(&cs.recvIdle, 1) + cs.recvMu.Lock() + atomic.AddInt32(&cs.recvIdle, -1) + + if cs.recvShutdown { + // Another goroutine already detected a connection problem; exit + // immediately. + cs.recvMu.Unlock() + return false + } + + messageSize := atomic.LoadUint32(&cs.messageSize) + if messageSize == 0 { + // Default or not yet negotiated. + messageSize = maximumLength + } + + // Receive a message. + tag, m, err := recv(cs.server.log, cs.t, messageSize, msgDotLRegistry.get) + if errSocket, ok := err.(ConnError); ok { + if errSocket.error != io.EOF { + // Connection problem; stop serving. + cs.server.log.Printf("p9.recv: %v", errSocket.error) + } + cs.recvShutdown = true + cs.recvMu.Unlock() + return false + } + + // Ensure that another goroutine is available to receive from cs.t. + if atomic.LoadInt32(&cs.recvIdle) == 0 { + go cs.handleRequests() // S/R-SAFE: Irrelevant. + } + cs.recvMu.Unlock() + + // Deal with other errors. + if err != nil && err != io.EOF { + // If it's not a connection error, but some other protocol error, + // we can send a response immediately. + cs.sendMu.Lock() + err := send(cs.server.log, cs.r, tag, newErr(err)) + cs.sendMu.Unlock() + if err != nil { + cs.server.log.Printf("p9.send: %v", err) + } + return true + } + + // Try to start the tag. + if !cs.StartTag(tag) { + cs.server.log.Printf("no valid tag [%05d]", tag) + // Nothing we can do at this point; client is bogus. + return true + } + + // Handle the message. + r := cs.handle(m) + + // Clear the tag before sending. That's because as soon as this + // hits the wire, the client can legally send another message + // with the same tag. + cs.ClearTag(tag) + + // Send back the result. + cs.sendMu.Lock() + err = send(cs.server.log, cs.r, tag, r) + cs.sendMu.Unlock() + if err != nil { + cs.server.log.Printf("p9.send: %v", err) + } + + msgDotLRegistry.put(m) + m = nil // 'm' should not be touched after this point. + return true +} + +func (cs *connState) handle(m message) (r message) { + defer func() { + if r == nil { + // Don't allow a panic to propagate. + err := recover() + + // Include a useful log message. + cs.server.log.Printf("panic in handler - %v: %s", err, debug.Stack()) + + // Wrap in an EFAULT error; we don't really have a + // better way to describe this kind of error. It will + // usually manifest as a result of the test framework. + r = newErr(linux.EFAULT) + } + }() + + if handler, ok := m.(handler); ok { + // Call the message handler. + r = handler.handle(cs) + } else { + // Produce an ENOSYS error. + r = newErr(linux.ENOSYS) + } + return +} + +func (cs *connState) handleRequests() { + for { + if !cs.handleRequest() { + return + } + } +} + +func (cs *connState) stop() { + // Wait for completion of all inflight request goroutines.. If a + // request is stuck, something has the opportunity to kill us with + // SIGABRT to get a stack dump of the offending handler. + cs.pendingWg.Wait() + + // Ensure the connection is closed. + cs.r.Close() + cs.t.Close() + + for _, fidRef := range cs.fids { + // Drop final reference in the fid table. Note this should + // always close the file, since we've ensured that there are no + // handlers running via the wait for Pending => 0 below. + fidRef.DecRef() + } +} + +// Handle handles a single connection. +func (s *Server) Handle(t io.ReadCloser, r io.WriteCloser) error { + cs := &connState{ + server: s, + t: t, + r: r, + fids: make(map[fid]*fidRef), + tags: make(map[tag]chan struct{}), + } + defer cs.stop() + + // Serve requests from t in the current goroutine; handleRequests() + // will create more goroutines as needed. + cs.handleRequests() + return nil +} + +func isErrClosing(err error) bool { + return strings.Contains(err.Error(), "use of closed network connection") +} + +// Serve handles requests from the bound socket. +// +// The passed serverSocket _must_ be created in packet mode. +func (s *Server) Serve(serverSocket net.Listener) error { + return s.ServeContext(nil, serverSocket) +} + +var errAlreadyClosed = errors.New("already closed") + +// ServeContext handles requests from the bound socket. +// +// The passed serverSocket _must_ be created in packet mode. +// +// When the context is done, the listener is closed and serve returns once +// every request has been handled. +func (s *Server) ServeContext(ctx context.Context, serverSocket net.Listener) error { + var wg sync.WaitGroup + defer wg.Wait() + + var cancelCause context.CancelCauseFunc + if ctx != nil { + ctx, cancelCause = context.WithCancelCause(ctx) + + wg.Add(1) + go func() { + defer wg.Done() + <-ctx.Done() + + // Only close the server socket if it wasn't already closed. + if err := ctx.Err(); errors.Is(err, errAlreadyClosed) { + return + } + serverSocket.Close() + }() + } + + for { + conn, err := serverSocket.Accept() + if err != nil { + if cancelCause != nil { + cancelCause(errAlreadyClosed) + } + if isErrClosing(err) { + return nil + } + // Something went wrong. + return err + } + + wg.Add(1) + go func(conn net.Conn) { // S/R-SAFE: Irrelevant. + s.Handle(conn, conn) + wg.Done() + }(conn) + } +} diff --git a/vendor/github.com/hugelgupf/p9/p9/transport.go b/vendor/github.com/hugelgupf/p9/p9/transport.go new file mode 100644 index 000000000..e0b6947d2 --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/p9/transport.go @@ -0,0 +1,245 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package p9 + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "sync" + + "github.com/hugelgupf/p9/vecnet" + "github.com/u-root/uio/ulog" +) + +// ConnError is returned in cases of a connection issue. +// +// This may be treated differently than other errors. +type ConnError struct { + // error is the socket error. + error +} + +func (e ConnError) Error() string { + return fmt.Sprintf("socket error: %v", e.error) +} + +// Is reports whether any error in err's chain matches target. +func (e ConnError) Is(target error) bool { return target == e.error } + +// ErrMessageTooLarge indicates the size was larger than reasonable. +type ErrMessageTooLarge struct { + size uint32 + msize uint32 +} + +// Error returns a sensible error. +func (e *ErrMessageTooLarge) Error() string { + return fmt.Sprintf("message too large for fixed buffer: size is %d, limit is %d", e.size, e.msize) +} + +// ErrNoValidMessage indicates no valid message could be decoded. +var ErrNoValidMessage = errors.New("buffer contained no valid message") + +const ( + // headerLength is the number of bytes required for a header. + headerLength uint32 = 7 + + // maximumLength is the largest possible message. + maximumLength uint32 = 4 * 1024 * 1024 + + // initialBufferLength is the initial data buffer we allocate. + initialBufferLength uint32 = 64 +) + +var dataPool = sync.Pool{ + New: func() interface{} { + // These buffers are used for decoding without a payload. + // We need to return a pointer to avoid unnecessary allocations + // (see https://staticcheck.io/docs/checks#SA6002). + b := make([]byte, initialBufferLength) + return &b + }, +} + +// send sends the given message over the socket. +func send(l ulog.Logger, w io.Writer, tag tag, m message) error { + data := dataPool.Get().(*[]byte) + dataBuf := buffer{data: (*data)[:0]} + + // Encode the message. The buffer will grow automatically. + m.encode(&dataBuf) + + l.Printf("send [w %p] [Tag %06d] %s", w, tag, m) + + // Get our vectors to send. + var hdr [headerLength]byte + vecs := make(net.Buffers, 0, 3) + vecs = append(vecs, hdr[:]) + if len(dataBuf.data) > 0 { + vecs = append(vecs, dataBuf.data) + } + totalLength := headerLength + uint32(len(dataBuf.data)) + + // Is there a payload? + if payloader, ok := m.(payloader); ok { + p := payloader.Payload() + if len(p) > 0 { + vecs = append(vecs, p) + totalLength += uint32(len(p)) + } + defer payloader.PayloadCleanup() + } + + // Construct the header. + headerBuf := buffer{data: hdr[:0]} + headerBuf.Write32(totalLength) + headerBuf.WriteMsgType(m.typ()) + headerBuf.WriteTag(tag) + + if _, err := vecs.WriteTo(w); err != nil { + return ConnError{err} + } + + // All set. + dataPool.Put(&dataBuf.data) + return nil +} + +// lookupTagAndType looks up an existing message or creates a new one. +// +// This is called by recv after decoding the header. Any error returned will be +// propagating back to the caller. You may use messageByType directly as a +// lookupTagAndType function (by design). +type lookupTagAndType func(tag tag, t msgType) (message, error) + +// recv decodes a message from the socket. +// +// This is done in two parts, and is thus not safe for multiple callers. +// +// On a socket error, the special error type ErrSocket is returned. +// +// The tag value NoTag will always be returned if err is non-nil. +func recv(l ulog.Logger, r io.Reader, msize uint32, lookup lookupTagAndType) (tag, message, error) { + // Read a header. + var hdr [headerLength]byte + + if _, err := io.ReadAtLeast(r, hdr[:], int(headerLength)); err != nil { + return noTag, nil, ConnError{err} + } + + // Decode the header. + headerBuf := buffer{data: hdr[:]} + size := headerBuf.Read32() + t := headerBuf.ReadMsgType() + tag := headerBuf.ReadTag() + if size < headerLength { + // The message is too small. + // + // See above: it's probably screwed. + return noTag, nil, ConnError{ErrNoValidMessage} + } + if size > maximumLength || size > msize { + // The message is too big. + return noTag, nil, ConnError{&ErrMessageTooLarge{size, msize}} + } + remaining := size - headerLength + + // Find our message to decode. + m, err := lookup(tag, t) + if err != nil { + // Throw away the contents of this message. + if remaining > 0 { + _, _ = io.Copy(ioutil.Discard, io.LimitReader(r, int64(remaining))) + } + return tag, nil, err + } + + // Not yet initialized. + var dataBuf buffer + var vecs vecnet.Buffers + + appendBuffer := func(size int) *[]byte { + // Pull a data buffer from the pool. + datap := dataPool.Get().(*[]byte) + data := *datap + if size > len(data) { + // Create a larger data buffer. + data = make([]byte, size) + datap = &data + } else { + // Limit the data buffer. + data = data[:size] + } + dataBuf = buffer{data: data} + vecs = append(vecs, data) + return datap + } + + // Read the rest of the payload. + // + // This requires some special care to ensure that the vectors all line + // up the way they should. We do this to minimize copying data around. + if payloader, ok := m.(payloader); ok { + fixedSize := payloader.FixedSize() + + // Do we need more than there is? + if fixedSize > remaining { + // This is not a valid message. + if remaining > 0 { + _, _ = io.Copy(ioutil.Discard, io.LimitReader(r, int64(remaining))) + } + return noTag, nil, ErrNoValidMessage + } + + if fixedSize != 0 { + datap := appendBuffer(int(fixedSize)) + defer dataPool.Put(datap) + } + + // Include the payload. + p := payloader.Payload() + if p == nil || len(p) != int(remaining-fixedSize) { + p = make([]byte, remaining-fixedSize) + payloader.SetPayload(p) + } + if len(p) > 0 { + vecs = append(vecs, p) + } + } else if remaining != 0 { + datap := appendBuffer(int(remaining)) + defer dataPool.Put(datap) + } + + if len(vecs) > 0 { + if _, err := vecs.ReadFrom(r); err != nil { + return noTag, nil, ConnError{err} + } + } + + // Decode the message data. + m.decode(&dataBuf) + if dataBuf.isOverrun() { + // No need to drain the socket. + return noTag, nil, ErrNoValidMessage + } + + l.Printf("recv [r %p] [Tag %06d] %s", r, tag, m) + + // All set. + return tag, m, nil +} diff --git a/vendor/github.com/hugelgupf/p9/p9/version.go b/vendor/github.com/hugelgupf/p9/p9/version.go new file mode 100644 index 000000000..236e1a319 --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/p9/version.go @@ -0,0 +1,134 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package p9 + +import ( + "fmt" + "strconv" + "strings" +) + +const ( + // highestSupportedVersion is the highest supported version X in a + // version string of the format 9P2000.L.Google.X. + // + // Clients are expected to start requesting this version number and + // to continuously decrement it until a Tversion request succeeds. + highestSupportedVersion uint32 = 7 + + // lowestSupportedVersion is the lowest supported version X in a + // version string of the format 9P2000.L.Google.X. + // + // Clients are free to send a Tversion request at a version below this + // value but are expected to encounter an Rlerror in response. + lowestSupportedVersion uint32 = 0 +) + +type baseVersion string + +const ( + undetermined baseVersion = "" + version9P2000 baseVersion = "9P2000" + version9P2000U baseVersion = "9P2000.u" + version9P2000L baseVersion = "9P2000.L" +) + +// HighestVersionString returns the highest possible version string that a client +// may request or a server may support. +func HighestVersionString() string { + return versionString(version9P2000L, highestSupportedVersion) +} + +// parseVersion parses a Tversion version string into a numeric version number +// if the version string is supported by p9. Otherwise returns (0, false). +// +// From Tversion(9P): "Version strings are defined such that, if the client string +// contains one or more period characters, the initial substring up to but not +// including any single period in the version string defines a version of the protocol." +// +// p9 intentionally diverges from this and always requires that the version string +// start with 9P2000.L to express that it is always compatible with 9P2000.L. The +// only supported versions extensions are of the format 9p2000.L.Google.X where X +// is an ever increasing version counter. +// +// Version 9P2000.L.Google.0 implies 9P2000.L. +// +// New versions must always be a strict superset of 9P2000.L. A version increase must +// define a predicate representing the feature extension introduced by that version. The +// predicate must be commented and should take the format: +// +// // VersionSupportsX returns true if version v supports X and must be checked when ... +// +// func VersionSupportsX(v int32) bool { +// ... +// +// ) +func parseVersion(str string) (baseVersion, uint32, bool) { + switch str { + case "9P2000.L": + return version9P2000L, 0, true + case "9P2000.u": + return version9P2000U, 0, true + case "9P2000": + return version9P2000, 0, true + default: + substr := strings.Split(str, ".") + if len(substr) != 4 { + return "", 0, false + } + if substr[0] != "9P2000" || substr[1] != "L" || substr[2] != "Google" || len(substr[3]) == 0 { + return "", 0, false + } + version, err := strconv.ParseUint(substr[3], 10, 32) + if err != nil { + return "", 0, false + } + return version9P2000L, uint32(version), true + } +} + +// versionString formats a p9 version number into a Tversion version string. +func versionString(baseVersion baseVersion, version uint32) string { + // Special case the base version so that clients expecting this string + // instead of the 9P2000.L.Google.0 equivalent get it. This is important + // for backwards compatibility with legacy servers that check for exactly + // the baseVersion and allow nothing else. + if version == 0 { + return string(baseVersion) + } + return fmt.Sprintf("9P2000.L.Google.%d", version) +} + +// versionSupportsTwalkgetattr returns true if version v supports the +// Twalkgetattr message. This predicate must be checked by clients before +// attempting to make a Twalkgetattr request. +func versionSupportsTwalkgetattr(v uint32) bool { + return v >= 2 +} + +// versionSupportsTucreation returns true if version v supports the Tucreation +// messages (Tucreate, Tusymlink, Tumkdir, Tumknod). This predicate must be +// checked by clients before attempting to make a Tucreation request. +// If Tucreation messages are not supported, their non-UID supporting +// counterparts (Tlcreate, Tsymlink, Tmkdir, Tmknod) should be used. +func versionSupportsTucreation(v uint32) bool { + return v >= 3 +} + +// VersionSupportsMultiUser returns true if version v supports multi-user fake +// directory permissions and ID values. +func VersionSupportsMultiUser(v uint32) bool { + return v >= 6 +} diff --git a/vendor/github.com/hugelgupf/p9/vecnet/iov32_linux.go b/vendor/github.com/hugelgupf/p9/vecnet/iov32_linux.go new file mode 100644 index 000000000..55a988f8c --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/vecnet/iov32_linux.go @@ -0,0 +1,22 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build 386 || mips || arm || mipsle +// +build 386 mips arm mipsle + +package vecnet + +func iovlen(i int) uint32 { + return uint32(i) +} diff --git a/vendor/github.com/hugelgupf/p9/vecnet/iov_linux.go b/vendor/github.com/hugelgupf/p9/vecnet/iov_linux.go new file mode 100644 index 000000000..5e5796112 --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/vecnet/iov_linux.go @@ -0,0 +1,22 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !386 && !arm && !mips && !mipsle +// +build !386,!arm,!mips,!mipsle + +package vecnet + +func iovlen(i int) uint64 { + return uint64(i) +} diff --git a/vendor/github.com/hugelgupf/p9/vecnet/vecnet.go b/vendor/github.com/hugelgupf/p9/vecnet/vecnet.go new file mode 100644 index 000000000..042078d54 --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/vecnet/vecnet.go @@ -0,0 +1,54 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package vecnet provides access to recvmsg syscalls on net.Conns. +package vecnet + +import ( + "io" + "net" + "syscall" +) + +// Buffers points to zero or more buffers to read into. +// +// On connections that support it, ReadFrom is optimized into the batch read +// operation recvmsg. +type Buffers net.Buffers + +// ReadFrom reads into the pre-allocated bufs. Returns bytes read. +// +// ReadFrom keeps reading until all bufs are filled or EOF is received. +// +// The pre-allocatted space used by ReadFrom is based upon slice lengths. +func (bufs Buffers) ReadFrom(r io.Reader) (int64, error) { + if conn, ok := r.(syscall.Conn); ok && readFromBuffers != nil { + return readFromBuffers(bufs, conn) + } + + var total int64 + for _, buf := range bufs { + for filled := 0; filled < len(buf); { + n, err := r.Read(buf[filled:]) + total += int64(n) + filled += n + if (n == 0 && err == nil) || err == io.EOF { + return total, io.EOF + } else if err != nil { + return total, err + } + } + } + return total, nil +} diff --git a/vendor/github.com/hugelgupf/p9/vecnet/vecnet_linux.go b/vendor/github.com/hugelgupf/p9/vecnet/vecnet_linux.go new file mode 100644 index 000000000..0c351f3fc --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/vecnet/vecnet_linux.go @@ -0,0 +1,112 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !386 +// +build !386 + +package vecnet + +import ( + "io" + "runtime" + "syscall" + "unsafe" +) + +var readFromBuffers = readFromBuffersLinux + +func readFromBuffersLinux(bufs Buffers, conn syscall.Conn) (int64, error) { + rc, err := conn.SyscallConn() + if err != nil { + return 0, err + } + + length := int64(0) + for _, buf := range bufs { + length += int64(len(buf)) + } + + for n := int64(0); n < length; { + cur, err := recvmsg(bufs, rc) + if err != nil && (cur == 0 || err != io.EOF) { + return n, err + } + n += int64(cur) + + // Consume iovecs to retry. + for consumed := 0; consumed < cur; { + if len(bufs[0]) <= cur-consumed { + consumed += len(bufs[0]) + bufs = bufs[1:] + } else { + bufs[0] = bufs[0][cur-consumed:] + break + } + } + } + return length, nil +} + +// buildIovec builds an iovec slice from the given []byte slice. +// +// iovecs is used as an initial slice, to avoid excessive allocations. +func buildIovec(bufs Buffers, iovecs []syscall.Iovec) ([]syscall.Iovec, int) { + var length int + for _, buf := range bufs { + if l := len(buf); l > 0 { + iovecs = append(iovecs, syscall.Iovec{ + Base: &buf[0], + Len: iovlen(l), + }) + length += l + } + } + return iovecs, length +} + +func recvmsg(bufs Buffers, rc syscall.RawConn) (int, error) { + iovecs, length := buildIovec(bufs, make([]syscall.Iovec, 0, 2)) + + var msg syscall.Msghdr + if len(iovecs) != 0 { + msg.Iov = &iovecs[0] + msg.Iovlen = iovlen(len(iovecs)) + } + + // n is the bytes received. + var n uintptr + var e syscall.Errno + err := rc.Read(func(fd uintptr) bool { + n, _, e = syscall.Syscall(syscall.SYS_RECVMSG, fd, uintptr(unsafe.Pointer(&msg)), syscall.MSG_DONTWAIT) + // Return false if EINTR, EAGAIN, or EWOULDBLOCK to retry. + return !(e == syscall.EINTR || e == syscall.EAGAIN || e == syscall.EWOULDBLOCK) + }) + runtime.KeepAlive(iovecs) + if err != nil { + return 0, err + } + if e != 0 { + return 0, e + } + + // The other end is closed by returning a 0 length read with no error. + if n == 0 { + return 0, io.EOF + } + + if int(n) > length { + return length, io.ErrShortBuffer + } + return int(n), nil +} diff --git a/vendor/github.com/hugelgupf/p9/vecnet/vecnet_other.go b/vendor/github.com/hugelgupf/p9/vecnet/vecnet_other.go new file mode 100644 index 000000000..c596450c6 --- /dev/null +++ b/vendor/github.com/hugelgupf/p9/vecnet/vecnet_other.go @@ -0,0 +1,24 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !linux || (linux && 386) +// +build !linux linux,386 + +package vecnet + +import ( + "syscall" +) + +var readFromBuffers func(bufs Buffers, conn syscall.Conn) (int64, error) diff --git a/vendor/github.com/insomniacslk/dhcp/dhcpv4/dhcpv4.go b/vendor/github.com/insomniacslk/dhcp/dhcpv4/dhcpv4.go index b894fbbcb..6043cd956 100644 --- a/vendor/github.com/insomniacslk/dhcp/dhcpv4/dhcpv4.go +++ b/vendor/github.com/insomniacslk/dhcp/dhcpv4/dhcpv4.go @@ -222,8 +222,7 @@ func PrependModifiers(m []Modifier, other ...Modifier) []Modifier { // NewInform builds a new DHCPv4 Informational message with the specified // hardware address. func NewInform(hwaddr net.HardwareAddr, localIP net.IP, modifiers ...Modifier) (*DHCPv4, error) { - return New(PrependModifiers( - modifiers, + return New(PrependModifiers(modifiers, WithHwAddr(hwaddr), WithMessageType(MessageTypeInform), WithClientIP(localIP), @@ -231,6 +230,7 @@ func NewInform(hwaddr net.HardwareAddr, localIP net.IP, modifiers ...Modifier) ( } // NewRequestFromOffer builds a DHCPv4 request from an offer. +// It assumes the SELECTING state by default, see Section 4.3.2 in RFC 2131 for more details. func NewRequestFromOffer(offer *DHCPv4, modifiers ...Modifier) (*DHCPv4, error) { return New(PrependModifiers(modifiers, WithReply(offer), @@ -248,6 +248,25 @@ func NewRequestFromOffer(offer *DHCPv4, modifiers ...Modifier) (*DHCPv4, error) )...) } +// NewRenewFromAck builds a DHCPv4 RENEW-style request from the ACK of a lease. RENEW requests have +// minor changes to their options compared to SELECT requests as specified by RFC 2131, section 4.3.2. +func NewRenewFromAck(ack *DHCPv4, modifiers ...Modifier) (*DHCPv4, error) { + return New(PrependModifiers(modifiers, + WithReply(ack), + WithMessageType(MessageTypeRequest), + // The client IP must be filled in with the IP offered to the client + WithClientIP(ack.YourIPAddr), + // The renewal request must use unicast + WithBroadcast(false), + WithRequestedOptions( + OptionSubnetMask, + OptionRouter, + OptionDomainName, + OptionDomainNameServer, + ), + )...) +} + // NewReplyFromRequest builds a DHCPv4 reply from a request. func NewReplyFromRequest(request *DHCPv4, modifiers ...Modifier) (*DHCPv4, error) { return New(PrependModifiers(modifiers, @@ -382,6 +401,13 @@ func (d *DHCPv4) GetOneOption(code OptionCode) []byte { return d.Options.Get(code) } +// DeleteOption deletes an existing option with the given option code. +func (d *DHCPv4) DeleteOption(code OptionCode) { + if d.Options != nil { + d.Options.Del(code) + } +} + // UpdateOption replaces an existing option with the same option code with the // given one, adding it if not already present. func (d *DHCPv4) UpdateOption(opt Option) { diff --git a/vendor/github.com/insomniacslk/dhcp/dhcpv4/fuzz.go b/vendor/github.com/insomniacslk/dhcp/dhcpv4/fuzz.go deleted file mode 100644 index cf62ba588..000000000 --- a/vendor/github.com/insomniacslk/dhcp/dhcpv4/fuzz.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build gofuzz - -package dhcpv4 - -import ( - "fmt" - "reflect" -) - -// Fuzz is the entrypoint for go-fuzz -func Fuzz(data []byte) int { - msg, err := FromBytes(data) - if err != nil { - return 0 - } - - serialized := msg.ToBytes() - - // Compared to dhcpv6, dhcpv4 has padding and fixed-size fields containing - // variable-length data; We can't expect the library to output byte-for-byte - // identical packets after a round-trip. - // Instead, we check that after a round-trip, the packet reserializes to the - // same internal representation - rtMsg, err := FromBytes(serialized) - - if err != nil || !reflect.DeepEqual(msg, rtMsg) { - fmt.Printf("Input: %x\n", data) - fmt.Printf("Round-trip: %x\n", serialized) - fmt.Println("Message: ", msg.Summary()) - fmt.Printf("Go repr: %#v\n", msg) - fmt.Println("Reserialized: ", rtMsg.Summary()) - fmt.Printf("Go repr: %#v\n", rtMsg) - if err != nil { - fmt.Printf("Got error while reserializing: %v\n", err) - panic("round-trip error: " + err.Error()) - } - panic("round-trip different: " + msg.Summary()) - } - - return 1 -} diff --git a/vendor/github.com/insomniacslk/dhcp/dhcpv4/modifiers.go b/vendor/github.com/insomniacslk/dhcp/dhcpv4/modifiers.go index 0ab35bc56..68da298ce 100644 --- a/vendor/github.com/insomniacslk/dhcp/dhcpv4/modifiers.go +++ b/vendor/github.com/insomniacslk/dhcp/dhcpv4/modifiers.go @@ -99,6 +99,13 @@ func WithOption(opt Option) Modifier { } } +// WithoutOption removes the DHCPv4 option with the given code +func WithoutOption(code OptionCode) Modifier { + return func(d *DHCPv4) { + d.DeleteOption(code) + } +} + // WithUserClass adds a user class option to the packet. // The rfc parameter allows you to specify if the userclass should be // rfc compliant or not. More details in issue #113 diff --git a/vendor/github.com/insomniacslk/dhcp/dhcpv4/options.go b/vendor/github.com/insomniacslk/dhcp/dhcpv4/options.go index fdc79aebf..9d404b43a 100644 --- a/vendor/github.com/insomniacslk/dhcp/dhcpv4/options.go +++ b/vendor/github.com/insomniacslk/dhcp/dhcpv4/options.go @@ -81,6 +81,11 @@ func (o Options) Has(opcode OptionCode) bool { return ok } +// Del deletes the option matching the option code. +func (o Options) Del(opcode OptionCode) { + delete(o, opcode.Code()) +} + // Update updates the existing options with the passed option, adding it // at the end if not present already func (o Options) Update(option Option) { diff --git a/vendor/github.com/insomniacslk/dhcp/dhcpv4/server4/conn_unix.go b/vendor/github.com/insomniacslk/dhcp/dhcpv4/server4/conn_unix.go index da62398d4..18dd98669 100644 --- a/vendor/github.com/insomniacslk/dhcp/dhcpv4/server4/conn_unix.go +++ b/vendor/github.com/insomniacslk/dhcp/dhcpv4/server4/conn_unix.go @@ -33,6 +33,10 @@ func NewIPv4UDPConn(iface string, addr *net.UDPAddr) (*net.UDPConn, error) { if err := unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_REUSEADDR, 1); err != nil { return nil, fmt.Errorf("cannot set reuseaddr on socket: %v", err) } + // Allow reusing the port to aid debugging and testing. + if err := unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_REUSEPORT, 1); err != nil { + return nil, fmt.Errorf("cannot set reuseport on socket: %v", err) + } if len(iface) != 0 { // Bind directly to the interface. if err := dhcpv4.BindToInterface(fd, iface); err != nil { diff --git a/vendor/github.com/insomniacslk/dhcp/iana/entid.go b/vendor/github.com/insomniacslk/dhcp/iana/entid.go index 8703b79cd..6aa318c69 100644 --- a/vendor/github.com/insomniacslk/dhcp/iana/entid.go +++ b/vendor/github.com/insomniacslk/dhcp/iana/entid.go @@ -5,13 +5,15 @@ type EnterpriseID int // See https://www.iana.org/assignments/enterprise-numbers/enterprise-numbers for values const ( - EnterpriseIDCiscoSystems EnterpriseID = 9 - EnterpriseIDCienaCorporation EnterpriseID = 1271 + EnterpriseIDCiscoSystems EnterpriseID = 9 + EnterpriseIDCienaCorporation EnterpriseID = 1271 + EnterpriseIDMellanoxTechnologiesLTD EnterpriseID = 33049 ) var enterpriseIDToStringMap = map[EnterpriseID]string{ - EnterpriseIDCiscoSystems: "Cisco Systems", - EnterpriseIDCienaCorporation: "Ciena Corporation", + EnterpriseIDCiscoSystems: "Cisco Systems", + EnterpriseIDCienaCorporation: "Ciena Corporation", + EnterpriseIDMellanoxTechnologiesLTD: "Mellanox Technologies LTD", } // String returns the vendor name for a given Enterprise ID diff --git a/vendor/github.com/insomniacslk/dhcp/rfc1035label/label.go b/vendor/github.com/insomniacslk/dhcp/rfc1035label/label.go index 5a67d7c94..f727ec6eb 100644 --- a/vendor/github.com/insomniacslk/dhcp/rfc1035label/label.go +++ b/vendor/github.com/insomniacslk/dhcp/rfc1035label/label.go @@ -89,6 +89,10 @@ func FromBytes(data []byte) (*Labels, error) { return &l, nil } +// ErrBufferTooShort is returned when the label cannot be parsed due to a wrong +// length or missing bytes. +var ErrBufferTooShort = errors.New("rfc1035label: buffer too short") + // fromBytes decodes a serialized stream and returns a list of labels func labelsFromBytes(buf []byte) ([]string, error) { var ( @@ -100,6 +104,12 @@ func labelsFromBytes(buf []byte) ([]string, error) { for { if pos >= len(buf) { + // interpret label without trailing zero-length byte as a partial + // domain name field as per RFC 4704 Section 4.2 + if label != "" { + labels = append(labels, label) + } + break } length := int(buf[pos]) @@ -126,7 +136,7 @@ func labelsFromBytes(buf []byte) ([]string, error) { pos = off } else { if pos+length > len(buf) { - return nil, errors.New("rfc1035label: buffer too short") + return nil, ErrBufferTooShort } chunk = string(buf[pos : pos+length]) if label != "" { diff --git a/vendor/github.com/josharian/native/doc.go b/vendor/github.com/josharian/native/doc.go new file mode 100644 index 000000000..2ca7ddc8a --- /dev/null +++ b/vendor/github.com/josharian/native/doc.go @@ -0,0 +1,8 @@ +// Package native provides easy access to native byte order. +// +// Usage: use native.Endian where you need the native binary.ByteOrder. +// +// Please think twice before using this package. +// It can break program portability. +// Native byte order is usually not the right answer. +package native diff --git a/vendor/github.com/josharian/native/endian_big.go b/vendor/github.com/josharian/native/endian_big.go new file mode 100644 index 000000000..77744fdd4 --- /dev/null +++ b/vendor/github.com/josharian/native/endian_big.go @@ -0,0 +1,14 @@ +//go:build mips || mips64 || ppc64 || s390x +// +build mips mips64 ppc64 s390x + +package native + +import "encoding/binary" + +// Endian is the encoding/binary.ByteOrder implementation for the +// current CPU's native byte order. +var Endian = binary.BigEndian + +// IsBigEndian is whether the current CPU's native byte order is big +// endian. +const IsBigEndian = true diff --git a/vendor/github.com/josharian/native/endian_generic.go b/vendor/github.com/josharian/native/endian_generic.go new file mode 100644 index 000000000..c15228f31 --- /dev/null +++ b/vendor/github.com/josharian/native/endian_generic.go @@ -0,0 +1,31 @@ +//go:build !mips && !mips64 && !ppc64 && !s390x && !amd64 && !386 && !arm && !arm64 && !loong64 && !mipsle && !mips64le && !ppc64le && !riscv64 && !wasm +// +build !mips,!mips64,!ppc64,!s390x,!amd64,!386,!arm,!arm64,!loong64,!mipsle,!mips64le,!ppc64le,!riscv64,!wasm + +// This file is a fallback, so that package native doesn't break +// the instant the Go project adds support for a new architecture. +// + +package native + +import ( + "encoding/binary" + "log" + "runtime" + "unsafe" +) + +var Endian binary.ByteOrder + +var IsBigEndian bool + +func init() { + b := uint16(0xff) // one byte + if *(*byte)(unsafe.Pointer(&b)) == 0 { + Endian = binary.BigEndian + IsBigEndian = true + } else { + Endian = binary.LittleEndian + IsBigEndian = false + } + log.Printf("github.com/josharian/native: unrecognized arch %v (%v), please file an issue", runtime.GOARCH, Endian) +} diff --git a/vendor/github.com/josharian/native/endian_little.go b/vendor/github.com/josharian/native/endian_little.go new file mode 100644 index 000000000..5098fec26 --- /dev/null +++ b/vendor/github.com/josharian/native/endian_little.go @@ -0,0 +1,14 @@ +//go:build amd64 || 386 || arm || arm64 || loong64 || mipsle || mips64le || ppc64le || riscv64 || wasm +// +build amd64 386 arm arm64 loong64 mipsle mips64le ppc64le riscv64 wasm + +package native + +import "encoding/binary" + +// Endian is the encoding/binary.ByteOrder implementation for the +// current CPU's native byte order. +var Endian = binary.LittleEndian + +// IsBigEndian is whether the current CPU's native byte order is big +// endian. +const IsBigEndian = false diff --git a/vendor/github.com/josharian/native/license b/vendor/github.com/josharian/native/license new file mode 100644 index 000000000..6e617a9c7 --- /dev/null +++ b/vendor/github.com/josharian/native/license @@ -0,0 +1,7 @@ +Copyright 2020 Josh Bleecher Snyder + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/josharian/native/readme.md b/vendor/github.com/josharian/native/readme.md new file mode 100644 index 000000000..1fc5a01b8 --- /dev/null +++ b/vendor/github.com/josharian/native/readme.md @@ -0,0 +1,10 @@ +Package native provides easy access to native byte order. + +`go get github.com/josharian/native` + +Usage: Use `native.Endian` where you need the native binary.ByteOrder. + +Please think twice before using this package. +It can break program portability. +Native byte order is usually not the right answer. + diff --git a/vendor/github.com/pierrec/lz4/v4/.gitignore b/vendor/github.com/pierrec/lz4/v4/.gitignore new file mode 100644 index 000000000..5d7e88de0 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/.gitignore @@ -0,0 +1,36 @@ +# Created by https://www.gitignore.io/api/macos + +### macOS ### +*.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +# End of https://www.gitignore.io/api/macos + +cmd/*/*exe +.idea + +fuzz/*.zip diff --git a/vendor/github.com/pierrec/lz4/v4/LICENSE b/vendor/github.com/pierrec/lz4/v4/LICENSE new file mode 100644 index 000000000..bd899d835 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2015, Pierre Curto +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of xxHash nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/pierrec/lz4/v4/README.md b/vendor/github.com/pierrec/lz4/v4/README.md new file mode 100644 index 000000000..4629c9d0e --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/README.md @@ -0,0 +1,92 @@ +# lz4 : LZ4 compression in pure Go + +[![Go Reference](https://pkg.go.dev/badge/github.com/pierrec/lz4/v4.svg)](https://pkg.go.dev/github.com/pierrec/lz4/v4) +[![CI](https://github.com/pierrec/lz4/workflows/ci/badge.svg)](https://github.com/pierrec/lz4/actions) +[![Go Report Card](https://goreportcard.com/badge/github.com/pierrec/lz4)](https://goreportcard.com/report/github.com/pierrec/lz4) +[![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/pierrec/lz4.svg?style=social)](https://github.com/pierrec/lz4/tags) + +## Overview + +This package provides a streaming interface to [LZ4 data streams](http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html) as well as low level compress and uncompress functions for LZ4 data blocks. +The implementation is based on the reference C [one](https://github.com/lz4/lz4). + +## Install + +Assuming you have the go toolchain installed: + +``` +go get github.com/pierrec/lz4/v4 +``` + +There is a command line interface tool to compress and decompress LZ4 files. + +``` +go install github.com/pierrec/lz4/v4/cmd/lz4c +``` + +Usage + +``` +Usage of lz4c: + -version + print the program version + +Subcommands: +Compress the given files or from stdin to stdout. +compress [arguments] [ ...] + -bc + enable block checksum + -l int + compression level (0=fastest) + -sc + disable stream checksum + -size string + block max size [64K,256K,1M,4M] (default "4M") + +Uncompress the given files or from stdin to stdout. +uncompress [arguments] [ ...] + +``` + + +## Example + +``` +// Compress and uncompress an input string. +s := "hello world" +r := strings.NewReader(s) + +// The pipe will uncompress the data from the writer. +pr, pw := io.Pipe() +zw := lz4.NewWriter(pw) +zr := lz4.NewReader(pr) + +go func() { + // Compress the input string. + _, _ = io.Copy(zw, r) + _ = zw.Close() // Make sure the writer is closed + _ = pw.Close() // Terminate the pipe +}() + +_, _ = io.Copy(os.Stdout, zr) + +// Output: +// hello world +``` + +## Contributing + +Contributions are very welcome for bug fixing, performance improvements...! + +- Open an issue with a proper description +- Send a pull request with appropriate test case(s) + +## Contributors + +Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors) so far! + +Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder. + +Special thanks to [@greatroar](https://github.com/greatroar) for his work on the asm implementations of the decoder for amd64 and arm64. + +Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code. diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go new file mode 100644 index 000000000..fec8adb03 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go @@ -0,0 +1,481 @@ +package lz4block + +import ( + "encoding/binary" + "math/bits" + "sync" + + "github.com/pierrec/lz4/v4/internal/lz4errors" +) + +const ( + // The following constants are used to setup the compression algorithm. + minMatch = 4 // the minimum size of the match sequence size (4 bytes) + winSizeLog = 16 // LZ4 64Kb window size limit + winSize = 1 << winSizeLog + winMask = winSize - 1 // 64Kb window of previous data for dependent blocks + + // hashLog determines the size of the hash table used to quickly find a previous match position. + // Its value influences the compression speed and memory usage, the lower the faster, + // but at the expense of the compression ratio. + // 16 seems to be the best compromise for fast compression. + hashLog = 16 + htSize = 1 << hashLog + + mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes. +) + +func recoverBlock(e *error) { + if r := recover(); r != nil && *e == nil { + *e = lz4errors.ErrInvalidSourceShortBuffer + } +} + +// blockHash hashes the lower 6 bytes into a value < htSize. +func blockHash(x uint64) uint32 { + const prime6bytes = 227718039650203 + return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog)) +} + +func CompressBlockBound(n int) int { + return n + n/255 + 16 +} + +func UncompressBlock(src, dst, dict []byte) (int, error) { + if len(src) == 0 { + return 0, nil + } + if di := decodeBlock(dst, src, dict); di >= 0 { + return di, nil + } + return 0, lz4errors.ErrInvalidSourceShortBuffer +} + +type Compressor struct { + // Offsets are at most 64kiB, so we can store only the lower 16 bits of + // match positions: effectively, an offset from some 64kiB block boundary. + // + // When we retrieve such an offset, we interpret it as relative to the last + // block boundary si &^ 0xffff, or the one before, (si &^ 0xffff) - 0x10000, + // depending on which of these is inside the current window. If a table + // entry was generated more than 64kiB back in the input, we find out by + // inspecting the input stream. + table [htSize]uint16 + + // Bitmap indicating which positions in the table are in use. + // This allows us to quickly reset the table for reuse, + // without having to zero everything. + inUse [htSize / 32]uint32 +} + +// Get returns the position of a presumptive match for the hash h. +// The match may be a false positive due to a hash collision or an old entry. +// If si < winSize, the return value may be negative. +func (c *Compressor) get(h uint32, si int) int { + h &= htSize - 1 + i := 0 + if c.inUse[h/32]&(1<<(h%32)) != 0 { + i = int(c.table[h]) + } + i += si &^ winMask + if i >= si { + // Try previous 64kiB block (negative when in first block). + i -= winSize + } + return i +} + +func (c *Compressor) put(h uint32, si int) { + h &= htSize - 1 + c.table[h] = uint16(si) + c.inUse[h/32] |= 1 << (h % 32) +} + +func (c *Compressor) reset() { c.inUse = [htSize / 32]uint32{} } + +var compressorPool = sync.Pool{New: func() interface{} { return new(Compressor) }} + +func CompressBlock(src, dst []byte) (int, error) { + c := compressorPool.Get().(*Compressor) + n, err := c.CompressBlock(src, dst) + compressorPool.Put(c) + return n, err +} + +func (c *Compressor) CompressBlock(src, dst []byte) (int, error) { + // Zero out reused table to avoid non-deterministic output (issue #65). + c.reset() + + // Return 0, nil only if the destination buffer size is < CompressBlockBound. + isNotCompressible := len(dst) < CompressBlockBound(len(src)) + + // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. + // This significantly speeds up incompressible data and usually has very small impact on compression. + // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) + const adaptSkipLog = 7 + + // si: Current position of the search. + // anchor: Position of the current literals. + var si, di, anchor int + sn := len(src) - mfLimit + if sn <= 0 { + goto lastLiterals + } + + // Fast scan strategy: the hash table only stores the last 4 bytes sequences. + for si < sn { + // Hash the next 6 bytes (sequence)... + match := binary.LittleEndian.Uint64(src[si:]) + h := blockHash(match) + h2 := blockHash(match >> 8) + + // We check a match at s, s+1 and s+2 and pick the first one we get. + // Checking 3 only requires us to load the source one. + ref := c.get(h, si) + ref2 := c.get(h2, si+1) + c.put(h, si) + c.put(h2, si+1) + + offset := si - ref + + if offset <= 0 || offset >= winSize || uint32(match) != binary.LittleEndian.Uint32(src[ref:]) { + // No match. Start calculating another hash. + // The processor can usually do this out-of-order. + h = blockHash(match >> 16) + ref3 := c.get(h, si+2) + + // Check the second match at si+1 + si += 1 + offset = si - ref2 + + if offset <= 0 || offset >= winSize || uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) { + // No match. Check the third match at si+2 + si += 1 + offset = si - ref3 + c.put(h, si) + + if offset <= 0 || offset >= winSize || uint32(match>>16) != binary.LittleEndian.Uint32(src[ref3:]) { + // Skip one extra byte (at si+3) before we check 3 matches again. + si += 2 + (si-anchor)>>adaptSkipLog + continue + } + } + } + + // Match found. + lLen := si - anchor // Literal length. + // We already matched 4 bytes. + mLen := 4 + + // Extend backwards if we can, reducing literals. + tOff := si - offset - 1 + for lLen > 0 && tOff >= 0 && src[si-1] == src[tOff] { + si-- + tOff-- + lLen-- + mLen++ + } + + // Add the match length, so we continue search at the end. + // Use mLen to store the offset base. + si, mLen = si+mLen, si+minMatch + + // Find the longest match by looking by batches of 8 bytes. + for si+8 <= sn { + x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:]) + if x == 0 { + si += 8 + } else { + // Stop is first non-zero byte. + si += bits.TrailingZeros64(x) >> 3 + break + } + } + + mLen = si - mLen + if di >= len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } + if mLen < 0xF { + dst[di] = byte(mLen) + } else { + dst[di] = 0xF + } + + // Encode literals length. + if lLen < 0xF { + dst[di] |= byte(lLen << 4) + } else { + dst[di] |= 0xF0 + di++ + l := lLen - 0xF + for ; l >= 0xFF && di < len(dst); l -= 0xFF { + dst[di] = 0xFF + di++ + } + if di >= len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } + dst[di] = byte(l) + } + di++ + + // Literals. + if di+lLen > len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } + copy(dst[di:di+lLen], src[anchor:anchor+lLen]) + di += lLen + 2 + anchor = si + + // Encode offset. + if di > len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } + dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) + + // Encode match length part 2. + if mLen >= 0xF { + for mLen -= 0xF; mLen >= 0xFF && di < len(dst); mLen -= 0xFF { + dst[di] = 0xFF + di++ + } + if di >= len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } + dst[di] = byte(mLen) + di++ + } + // Check if we can load next values. + if si >= sn { + break + } + // Hash match end-2 + h = blockHash(binary.LittleEndian.Uint64(src[si-2:])) + c.put(h, si-2) + } + +lastLiterals: + if isNotCompressible && anchor == 0 { + // Incompressible. + return 0, nil + } + + // Last literals. + if di >= len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } + lLen := len(src) - anchor + if lLen < 0xF { + dst[di] = byte(lLen << 4) + } else { + dst[di] = 0xF0 + di++ + for lLen -= 0xF; lLen >= 0xFF && di < len(dst); lLen -= 0xFF { + dst[di] = 0xFF + di++ + } + if di >= len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } + dst[di] = byte(lLen) + } + di++ + + // Write the last literals. + if isNotCompressible && di >= anchor { + // Incompressible. + return 0, nil + } + if di+len(src)-anchor > len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } + di += copy(dst[di:di+len(src)-anchor], src[anchor:]) + return di, nil +} + +// blockHash hashes 4 bytes into a value < winSize. +func blockHashHC(x uint32) uint32 { + const hasher uint32 = 2654435761 // Knuth multiplicative hash. + return x * hasher >> (32 - winSizeLog) +} + +type CompressorHC struct { + // hashTable: stores the last position found for a given hash + // chainTable: stores previous positions for a given hash + hashTable, chainTable [htSize]int + needsReset bool +} + +var compressorHCPool = sync.Pool{New: func() interface{} { return new(CompressorHC) }} + +func CompressBlockHC(src, dst []byte, depth CompressionLevel) (int, error) { + c := compressorHCPool.Get().(*CompressorHC) + n, err := c.CompressBlock(src, dst, depth) + compressorHCPool.Put(c) + return n, err +} + +func (c *CompressorHC) CompressBlock(src, dst []byte, depth CompressionLevel) (_ int, err error) { + if c.needsReset { + // Zero out reused table to avoid non-deterministic output (issue #65). + c.hashTable = [htSize]int{} + c.chainTable = [htSize]int{} + } + c.needsReset = true // Only false on first call. + + defer recoverBlock(&err) + + // Return 0, nil only if the destination buffer size is < CompressBlockBound. + isNotCompressible := len(dst) < CompressBlockBound(len(src)) + + // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. + // This significantly speeds up incompressible data and usually has very small impact on compression. + // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) + const adaptSkipLog = 7 + + var si, di, anchor int + sn := len(src) - mfLimit + if sn <= 0 { + goto lastLiterals + } + + if depth == 0 { + depth = winSize + } + + for si < sn { + // Hash the next 4 bytes (sequence). + match := binary.LittleEndian.Uint32(src[si:]) + h := blockHashHC(match) + + // Follow the chain until out of window and give the longest match. + mLen := 0 + offset := 0 + for next, try := c.hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next, try = c.chainTable[next&winMask], try-1 { + // The first (mLen==0) or next byte (mLen>=minMatch) at current match length + // must match to improve on the match length. + if src[next+mLen] != src[si+mLen] { + continue + } + ml := 0 + // Compare the current position with a previous with the same hash. + for ml < sn-si { + x := binary.LittleEndian.Uint64(src[next+ml:]) ^ binary.LittleEndian.Uint64(src[si+ml:]) + if x == 0 { + ml += 8 + } else { + // Stop is first non-zero byte. + ml += bits.TrailingZeros64(x) >> 3 + break + } + } + if ml < minMatch || ml <= mLen { + // Match too small (>adaptSkipLog + continue + } + + // Match found. + // Update hash/chain tables with overlapping bytes: + // si already hashed, add everything from si+1 up to the match length. + winStart := si + 1 + if ws := si + mLen - winSize; ws > winStart { + winStart = ws + } + for si, ml := winStart, si+mLen; si < ml; { + match >>= 8 + match |= uint32(src[si+3]) << 24 + h := blockHashHC(match) + c.chainTable[si&winMask] = c.hashTable[h] + c.hashTable[h] = si + si++ + } + + lLen := si - anchor + si += mLen + mLen -= minMatch // Match length does not include minMatch. + + if mLen < 0xF { + dst[di] = byte(mLen) + } else { + dst[di] = 0xF + } + + // Encode literals length. + if lLen < 0xF { + dst[di] |= byte(lLen << 4) + } else { + dst[di] |= 0xF0 + di++ + l := lLen - 0xF + for ; l >= 0xFF; l -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(l) + } + di++ + + // Literals. + copy(dst[di:di+lLen], src[anchor:anchor+lLen]) + di += lLen + anchor = si + + // Encode offset. + di += 2 + dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) + + // Encode match length part 2. + if mLen >= 0xF { + for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(mLen) + di++ + } + } + + if isNotCompressible && anchor == 0 { + // Incompressible. + return 0, nil + } + + // Last literals. +lastLiterals: + lLen := len(src) - anchor + if lLen < 0xF { + dst[di] = byte(lLen << 4) + } else { + dst[di] = 0xF0 + di++ + lLen -= 0xF + for ; lLen >= 0xFF; lLen -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(lLen) + } + di++ + + // Write the last literals. + if isNotCompressible && di >= anchor { + // Incompressible. + return 0, nil + } + di += copy(dst[di:di+len(src)-anchor], src[anchor:]) + return di, nil +} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go new file mode 100644 index 000000000..a1bfa99e4 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go @@ -0,0 +1,90 @@ +// Package lz4block provides LZ4 BlockSize types and pools of buffers. +package lz4block + +import "sync" + +const ( + Block64Kb uint32 = 1 << (16 + iota*2) + Block256Kb + Block1Mb + Block4Mb +) + +// In legacy mode all blocks are compressed regardless +// of the compressed size: use the bound size. +var Block8Mb = uint32(CompressBlockBound(8 << 20)) + +var ( + BlockPool64K = sync.Pool{New: func() interface{} { return make([]byte, Block64Kb) }} + BlockPool256K = sync.Pool{New: func() interface{} { return make([]byte, Block256Kb) }} + BlockPool1M = sync.Pool{New: func() interface{} { return make([]byte, Block1Mb) }} + BlockPool4M = sync.Pool{New: func() interface{} { return make([]byte, Block4Mb) }} + BlockPool8M = sync.Pool{New: func() interface{} { return make([]byte, Block8Mb) }} +) + +func Index(b uint32) BlockSizeIndex { + switch b { + case Block64Kb: + return 4 + case Block256Kb: + return 5 + case Block1Mb: + return 6 + case Block4Mb: + return 7 + case Block8Mb: // only valid in legacy mode + return 3 + } + return 0 +} + +func IsValid(b uint32) bool { + return Index(b) > 0 +} + +type BlockSizeIndex uint8 + +func (b BlockSizeIndex) IsValid() bool { + switch b { + case 4, 5, 6, 7: + return true + } + return false +} + +func (b BlockSizeIndex) Get() []byte { + var buf interface{} + switch b { + case 4: + buf = BlockPool64K.Get() + case 5: + buf = BlockPool256K.Get() + case 6: + buf = BlockPool1M.Get() + case 7: + buf = BlockPool4M.Get() + case 3: + buf = BlockPool8M.Get() + } + return buf.([]byte) +} + +func Put(buf []byte) { + // Safeguard: do not allow invalid buffers. + switch c := cap(buf); uint32(c) { + case Block64Kb: + BlockPool64K.Put(buf[:c]) + case Block256Kb: + BlockPool256K.Put(buf[:c]) + case Block1Mb: + BlockPool1M.Put(buf[:c]) + case Block4Mb: + BlockPool4M.Put(buf[:c]) + case Block8Mb: + BlockPool8M.Put(buf[:c]) + } +} + +type CompressionLevel uint32 + +const Fast CompressionLevel = 0 diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s new file mode 100644 index 000000000..1d00133fa --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s @@ -0,0 +1,448 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "go_asm.h" +#include "textflag.h" + +// AX scratch +// BX scratch +// CX literal and match lengths +// DX token, match offset +// +// DI &dst +// SI &src +// R8 &dst + len(dst) +// R9 &src + len(src) +// R11 &dst +// R12 short output end +// R13 short input end +// R14 &dict +// R15 len(dict) + +// func decodeBlock(dst, src, dict []byte) int +TEXT ·decodeBlock(SB), NOSPLIT, $48-80 + MOVQ dst_base+0(FP), DI + MOVQ DI, R11 + MOVQ dst_len+8(FP), R8 + ADDQ DI, R8 + + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R9 + CMPQ R9, $0 + JE err_corrupt + ADDQ SI, R9 + + MOVQ dict_base+48(FP), R14 + MOVQ dict_len+56(FP), R15 + + // shortcut ends + // short output end + MOVQ R8, R12 + SUBQ $32, R12 + // short input end + MOVQ R9, R13 + SUBQ $16, R13 + + XORL CX, CX + +loop: + // token := uint32(src[si]) + MOVBLZX (SI), DX + INCQ SI + + // lit_len = token >> 4 + // if lit_len > 0 + // CX = lit_len + MOVL DX, CX + SHRL $4, CX + + // if lit_len != 0xF + CMPL CX, $0xF + JEQ lit_len_loop + CMPQ DI, R12 + JAE copy_literal + CMPQ SI, R13 + JAE copy_literal + + // copy shortcut + + // A two-stage shortcut for the most common case: + // 1) If the literal length is 0..14, and there is enough space, + // enter the shortcut and copy 16 bytes on behalf of the literals + // (in the fast mode, only 8 bytes can be safely copied this way). + // 2) Further if the match length is 4..18, copy 18 bytes in a similar + // manner; but we ensure that there's enough space in the output for + // those 18 bytes earlier, upon entering the shortcut (in other words, + // there is a combined check for both stages). + + // copy literal + MOVOU (SI), X0 + MOVOU X0, (DI) + ADDQ CX, DI + ADDQ CX, SI + + MOVL DX, CX + ANDL $0xF, CX + + // The second stage: prepare for match copying, decode full info. + // If it doesn't work out, the info won't be wasted. + // offset := uint16(data[:2]) + MOVWLZX (SI), DX + TESTL DX, DX + JE err_corrupt + ADDQ $2, SI + JC err_short_buf + + MOVQ DI, AX + SUBQ DX, AX + JC err_corrupt + CMPQ AX, DI + JA err_short_buf + + // if we can't do the second stage then jump straight to read the + // match length, we already have the offset. + CMPL CX, $0xF + JEQ match_len_loop_pre + CMPL DX, $8 + JLT match_len_loop_pre + CMPQ AX, R11 + JB match_len_loop_pre + + // memcpy(op + 0, match + 0, 8); + MOVQ (AX), BX + MOVQ BX, (DI) + // memcpy(op + 8, match + 8, 8); + MOVQ 8(AX), BX + MOVQ BX, 8(DI) + // memcpy(op +16, match +16, 2); + MOVW 16(AX), BX + MOVW BX, 16(DI) + + LEAQ const_minMatch(DI)(CX*1), DI + + // shortcut complete, load next token + JMP loopcheck + + // Read the rest of the literal length: + // do { BX = src[si++]; lit_len += BX } while (BX == 0xFF). +lit_len_loop: + CMPQ SI, R9 + JAE err_short_buf + + MOVBLZX (SI), BX + INCQ SI + ADDQ BX, CX + + CMPB BX, $0xFF + JE lit_len_loop + +copy_literal: + // bounds check src and dst + MOVQ SI, AX + ADDQ CX, AX + JC err_short_buf + CMPQ AX, R9 + JA err_short_buf + + MOVQ DI, BX + ADDQ CX, BX + JC err_short_buf + CMPQ BX, R8 + JA err_short_buf + + // Copy literals of <=48 bytes through the XMM registers. + CMPQ CX, $48 + JGT memmove_lit + + // if len(dst[di:]) < 48 + MOVQ R8, AX + SUBQ DI, AX + CMPQ AX, $48 + JLT memmove_lit + + // if len(src[si:]) < 48 + MOVQ R9, BX + SUBQ SI, BX + CMPQ BX, $48 + JLT memmove_lit + + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU 32(SI), X2 + MOVOU X0, (DI) + MOVOU X1, 16(DI) + MOVOU X2, 32(DI) + + ADDQ CX, SI + ADDQ CX, DI + + JMP finish_lit_copy + +memmove_lit: + // memmove(to, from, len) + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + + // Spill registers. Increment SI, DI now so we don't need to save CX. + ADDQ CX, DI + ADDQ CX, SI + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVL DX, 40(SP) + + CALL runtime·memmove(SB) + + // restore registers + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVL 40(SP), DX + + // recalc initial values + MOVQ dst_base+0(FP), R8 + MOVQ R8, R11 + ADDQ dst_len+8(FP), R8 + MOVQ src_base+24(FP), R9 + ADDQ src_len+32(FP), R9 + MOVQ dict_base+48(FP), R14 + MOVQ dict_len+56(FP), R15 + MOVQ R8, R12 + SUBQ $32, R12 + MOVQ R9, R13 + SUBQ $16, R13 + +finish_lit_copy: + // CX := mLen + // free up DX to use for offset + MOVL DX, CX + ANDL $0xF, CX + + CMPQ SI, R9 + JAE end + + // offset + // si += 2 + // DX := int(src[si-2]) | int(src[si-1])<<8 + ADDQ $2, SI + JC err_short_buf + CMPQ SI, R9 + JA err_short_buf + MOVWQZX -2(SI), DX + + // 0 offset is invalid + TESTL DX, DX + JEQ err_corrupt + +match_len_loop_pre: + // if mlen != 0xF + CMPB CX, $0xF + JNE copy_match + + // do { BX = src[si++]; mlen += BX } while (BX == 0xFF). +match_len_loop: + CMPQ SI, R9 + JAE err_short_buf + + MOVBLZX (SI), BX + INCQ SI + ADDQ BX, CX + + CMPB BX, $0xFF + JE match_len_loop + +copy_match: + ADDQ $const_minMatch, CX + + // check we have match_len bytes left in dst + // di+match_len < len(dst) + MOVQ DI, AX + ADDQ CX, AX + JC err_short_buf + CMPQ AX, R8 + JA err_short_buf + + // DX = offset + // CX = match_len + // BX = &dst + (di - offset) + MOVQ DI, BX + SUBQ DX, BX + + // check BX is within dst + // if BX < &dst + JC copy_match_from_dict + CMPQ BX, R11 + JBE copy_match_from_dict + + // if offset + match_len < di + LEAQ (BX)(CX*1), AX + CMPQ DI, AX + JA copy_interior_match + + // AX := len(dst[:di]) + // MOVQ DI, AX + // SUBQ R11, AX + + // copy 16 bytes at a time + // if di-offset < 16 copy 16-(di-offset) bytes to di + // then do the remaining + +copy_match_loop: + // for match_len >= 0 + // dst[di] = dst[i] + // di++ + // i++ + MOVB (BX), AX + MOVB AX, (DI) + INCQ DI + INCQ BX + DECQ CX + JNZ copy_match_loop + + JMP loopcheck + +copy_interior_match: + CMPQ CX, $16 + JGT memmove_match + + // if len(dst[di:]) < 16 + MOVQ R8, AX + SUBQ DI, AX + CMPQ AX, $16 + JLT memmove_match + + MOVOU (BX), X0 + MOVOU X0, (DI) + + ADDQ CX, DI + XORL CX, CX + JMP loopcheck + +copy_match_from_dict: + // CX = match_len + // BX = &dst + (di - offset) + + // AX = offset - di = dict_bytes_available => count of bytes potentially covered by the dictionary + MOVQ R11, AX + SUBQ BX, AX + + // BX = len(dict) - dict_bytes_available + MOVQ R15, BX + SUBQ AX, BX + JS err_short_dict + + ADDQ R14, BX + + // if match_len > dict_bytes_available, match fits entirely within external dictionary : just copy + CMPQ CX, AX + JLT memmove_match + + // The match stretches over the dictionary and our block + // 1) copy what comes from the dictionary + // AX = dict_bytes_available = copy_size + // BX = &dict_end - copy_size + // CX = match_len + + // memmove(to, from, len) + MOVQ DI, 0(SP) + MOVQ BX, 8(SP) + MOVQ AX, 16(SP) + // store extra stuff we want to recover + // spill + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // restore registers + MOVQ 16(SP), AX // copy_size + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX // match_len + + // recalc initial values + MOVQ dst_base+0(FP), R8 + MOVQ R8, R11 // TODO: make these sensible numbers + ADDQ dst_len+8(FP), R8 + MOVQ src_base+24(FP), R9 + ADDQ src_len+32(FP), R9 + MOVQ dict_base+48(FP), R14 + MOVQ dict_len+56(FP), R15 + MOVQ R8, R12 + SUBQ $32, R12 + MOVQ R9, R13 + SUBQ $16, R13 + + // di+=copy_size + ADDQ AX, DI + + // 2) copy the rest from the current block + // CX = match_len - copy_size = rest_size + SUBQ AX, CX + MOVQ R11, BX + + // check if we have a copy overlap + // AX = &dst + rest_size + MOVQ CX, AX + ADDQ BX, AX + // if &dst + rest_size > di, copy byte by byte + CMPQ AX, DI + + JA copy_match_loop + +memmove_match: + // memmove(to, from, len) + MOVQ DI, 0(SP) + MOVQ BX, 8(SP) + MOVQ CX, 16(SP) + + // Spill registers. Increment DI now so we don't need to save CX. + ADDQ CX, DI + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + + CALL runtime·memmove(SB) + + // restore registers + MOVQ 24(SP), DI + MOVQ 32(SP), SI + + // recalc initial values + MOVQ dst_base+0(FP), R8 + MOVQ R8, R11 // TODO: make these sensible numbers + ADDQ dst_len+8(FP), R8 + MOVQ src_base+24(FP), R9 + ADDQ src_len+32(FP), R9 + MOVQ R8, R12 + SUBQ $32, R12 + MOVQ R9, R13 + SUBQ $16, R13 + MOVQ dict_base+48(FP), R14 + MOVQ dict_len+56(FP), R15 + XORL CX, CX + +loopcheck: + // for si < len(src) + CMPQ SI, R9 + JB loop + +end: + // Remaining length must be zero. + TESTQ CX, CX + JNE err_corrupt + + SUBQ R11, DI + MOVQ DI, ret+72(FP) + RET + +err_corrupt: + MOVQ $-1, ret+72(FP) + RET + +err_short_buf: + MOVQ $-2, ret+72(FP) + RET + +err_short_dict: + MOVQ $-3, ret+72(FP) + RET diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s new file mode 100644 index 000000000..20b21fcf1 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s @@ -0,0 +1,231 @@ +// +build gc +// +build !noasm + +#include "go_asm.h" +#include "textflag.h" + +// Register allocation. +#define dst R0 +#define dstorig R1 +#define src R2 +#define dstend R3 +#define srcend R4 +#define match R5 // Match address. +#define dictend R6 +#define token R7 +#define len R8 // Literal and match lengths. +#define offset R7 // Match offset; overlaps with token. +#define tmp1 R9 +#define tmp2 R11 +#define tmp3 R12 + +// func decodeBlock(dst, src, dict []byte) int +TEXT ·decodeBlock(SB), NOFRAME+NOSPLIT, $-4-40 + MOVW dst_base +0(FP), dst + MOVW dst_len +4(FP), dstend + MOVW src_base +12(FP), src + MOVW src_len +16(FP), srcend + + CMP $0, srcend + BEQ shortSrc + + ADD dst, dstend + ADD src, srcend + + MOVW dst, dstorig + +loop: + // Read token. Extract literal length. + MOVBU.P 1(src), token + MOVW token >> 4, len + CMP $15, len + BNE readLitlenDone + +readLitlenLoop: + CMP src, srcend + BEQ shortSrc + MOVBU.P 1(src), tmp1 + ADD.S tmp1, len + BVS shortDst + CMP $255, tmp1 + BEQ readLitlenLoop + +readLitlenDone: + CMP $0, len + BEQ copyLiteralDone + + // Bounds check dst+len and src+len. + ADD.S dst, len, tmp1 + ADD.CC.S src, len, tmp2 + BCS shortSrc + CMP dstend, tmp1 + //BHI shortDst // Uncomment for distinct error codes. + CMP.LS srcend, tmp2 + BHI shortSrc + + // Copy literal. + CMP $4, len + BLO copyLiteralFinish + + // Copy 0-3 bytes until src is aligned. + TST $1, src + MOVBU.NE.P 1(src), tmp1 + MOVB.NE.P tmp1, 1(dst) + SUB.NE $1, len + + TST $2, src + MOVHU.NE.P 2(src), tmp2 + MOVB.NE.P tmp2, 1(dst) + MOVW.NE tmp2 >> 8, tmp1 + MOVB.NE.P tmp1, 1(dst) + SUB.NE $2, len + + B copyLiteralLoopCond + +copyLiteralLoop: + // Aligned load, unaligned write. + MOVW.P 4(src), tmp1 + MOVW tmp1 >> 8, tmp2 + MOVB tmp2, 1(dst) + MOVW tmp1 >> 16, tmp3 + MOVB tmp3, 2(dst) + MOVW tmp1 >> 24, tmp2 + MOVB tmp2, 3(dst) + MOVB.P tmp1, 4(dst) +copyLiteralLoopCond: + // Loop until len-4 < 0. + SUB.S $4, len + BPL copyLiteralLoop + +copyLiteralFinish: + // Copy remaining 0-3 bytes. + // At this point, len may be < 0, but len&3 is still accurate. + TST $1, len + MOVB.NE.P 1(src), tmp3 + MOVB.NE.P tmp3, 1(dst) + TST $2, len + MOVB.NE.P 2(src), tmp1 + MOVB.NE.P tmp1, 2(dst) + MOVB.NE -1(src), tmp2 + MOVB.NE tmp2, -1(dst) + +copyLiteralDone: + // Initial part of match length. + // This frees up the token register for reuse as offset. + AND $15, token, len + + CMP src, srcend + BEQ end + + // Read offset. + ADD.S $2, src + BCS shortSrc + CMP srcend, src + BHI shortSrc + MOVBU -2(src), offset + MOVBU -1(src), tmp1 + ORR.S tmp1 << 8, offset + BEQ corrupt + + // Read rest of match length. + CMP $15, len + BNE readMatchlenDone + +readMatchlenLoop: + CMP src, srcend + BEQ shortSrc + MOVBU.P 1(src), tmp1 + ADD.S tmp1, len + BVS shortDst + CMP $255, tmp1 + BEQ readMatchlenLoop + +readMatchlenDone: + // Bounds check dst+len+minMatch. + ADD.S dst, len, tmp1 + ADD.CC.S $const_minMatch, tmp1 + BCS shortDst + CMP dstend, tmp1 + BHI shortDst + + RSB dst, offset, match + CMP dstorig, match + BGE copyMatch4 + + // match < dstorig means the match starts in the dictionary, + // at len(dict) - offset + (dst - dstorig). + MOVW dict_base+24(FP), match + MOVW dict_len +28(FP), dictend + + ADD $const_minMatch, len + + RSB dst, dstorig, tmp1 + RSB dictend, offset, tmp2 + ADD.S tmp2, tmp1 + BMI shortDict + ADD match, dictend + ADD tmp1, match + +copyDict: + MOVBU.P 1(match), tmp1 + MOVB.P tmp1, 1(dst) + SUB.S $1, len + CMP.NE match, dictend + BNE copyDict + + // If the match extends beyond the dictionary, the rest is at dstorig. + CMP $0, len + BEQ copyMatchDone + MOVW dstorig, match + B copyMatch + + // Copy a regular match. + // Since len+minMatch is at least four, we can do a 4× unrolled + // byte copy loop. Using MOVW instead of four byte loads is faster, + // but to remain portable we'd have to align match first, which is + // too expensive. By alternating loads and stores, we also handle + // the case offset < 4. +copyMatch4: + SUB.S $4, len + MOVBU.P 4(match), tmp1 + MOVB.P tmp1, 4(dst) + MOVBU -3(match), tmp2 + MOVB tmp2, -3(dst) + MOVBU -2(match), tmp3 + MOVB tmp3, -2(dst) + MOVBU -1(match), tmp1 + MOVB tmp1, -1(dst) + BPL copyMatch4 + + // Restore len, which is now negative. + ADD.S $4, len + BEQ copyMatchDone + +copyMatch: + // Finish with a byte-at-a-time copy. + SUB.S $1, len + MOVBU.P 1(match), tmp2 + MOVB.P tmp2, 1(dst) + BNE copyMatch + +copyMatchDone: + CMP src, srcend + BNE loop + +end: + CMP $0, len + BNE corrupt + SUB dstorig, dst, tmp1 + MOVW tmp1, ret+36(FP) + RET + + // The error cases have distinct labels so we can put different + // return codes here when debugging, or if the error returns need to + // be changed. +shortDict: +shortDst: +shortSrc: +corrupt: + MOVW $-1, tmp1 + MOVW tmp1, ret+36(FP) + RET diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s new file mode 100644 index 000000000..d2fe11b8e --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s @@ -0,0 +1,241 @@ +// +build gc +// +build !noasm + +// This implementation assumes that strict alignment checking is turned off. +// The Go compiler makes the same assumption. + +#include "go_asm.h" +#include "textflag.h" + +// Register allocation. +#define dst R0 +#define dstorig R1 +#define src R2 +#define dstend R3 +#define dstend16 R4 // dstend - 16 +#define srcend R5 +#define srcend16 R6 // srcend - 16 +#define match R7 // Match address. +#define dict R8 +#define dictlen R9 +#define dictend R10 +#define token R11 +#define len R12 // Literal and match lengths. +#define lenRem R13 +#define offset R14 // Match offset. +#define tmp1 R15 +#define tmp2 R16 +#define tmp3 R17 +#define tmp4 R19 + +// func decodeBlock(dst, src, dict []byte) int +TEXT ·decodeBlock(SB), NOFRAME+NOSPLIT, $0-80 + LDP dst_base+0(FP), (dst, dstend) + ADD dst, dstend + MOVD dst, dstorig + + LDP src_base+24(FP), (src, srcend) + CBZ srcend, shortSrc + ADD src, srcend + + // dstend16 = max(dstend-16, 0) and similarly for srcend16. + SUBS $16, dstend, dstend16 + CSEL LO, ZR, dstend16, dstend16 + SUBS $16, srcend, srcend16 + CSEL LO, ZR, srcend16, srcend16 + + LDP dict_base+48(FP), (dict, dictlen) + ADD dict, dictlen, dictend + +loop: + // Read token. Extract literal length. + MOVBU.P 1(src), token + LSR $4, token, len + CMP $15, len + BNE readLitlenDone + +readLitlenLoop: + CMP src, srcend + BEQ shortSrc + MOVBU.P 1(src), tmp1 + ADDS tmp1, len + BVS shortDst + CMP $255, tmp1 + BEQ readLitlenLoop + +readLitlenDone: + CBZ len, copyLiteralDone + + // Bounds check dst+len and src+len. + ADDS dst, len, tmp1 + BCS shortSrc + ADDS src, len, tmp2 + BCS shortSrc + CMP dstend, tmp1 + BHI shortDst + CMP srcend, tmp2 + BHI shortSrc + + // Copy literal. + SUBS $16, len + BLO copyLiteralShort + +copyLiteralLoop: + LDP.P 16(src), (tmp1, tmp2) + STP.P (tmp1, tmp2), 16(dst) + SUBS $16, len + BPL copyLiteralLoop + + // Copy (final part of) literal of length 0-15. + // If we have >=16 bytes left in src and dst, just copy 16 bytes. +copyLiteralShort: + CMP dstend16, dst + CCMP LO, src, srcend16, $0b0010 // 0010 = preserve carry (LO). + BHS copyLiteralShortEnd + + AND $15, len + + LDP (src), (tmp1, tmp2) + ADD len, src + STP (tmp1, tmp2), (dst) + ADD len, dst + + B copyLiteralDone + + // Safe but slow copy near the end of src, dst. +copyLiteralShortEnd: + TBZ $3, len, 3(PC) + MOVD.P 8(src), tmp1 + MOVD.P tmp1, 8(dst) + TBZ $2, len, 3(PC) + MOVW.P 4(src), tmp2 + MOVW.P tmp2, 4(dst) + TBZ $1, len, 3(PC) + MOVH.P 2(src), tmp3 + MOVH.P tmp3, 2(dst) + TBZ $0, len, 3(PC) + MOVBU.P 1(src), tmp4 + MOVB.P tmp4, 1(dst) + +copyLiteralDone: + // Initial part of match length. + AND $15, token, len + + CMP src, srcend + BEQ end + + // Read offset. + ADDS $2, src + BCS shortSrc + CMP srcend, src + BHI shortSrc + MOVHU -2(src), offset + CBZ offset, corrupt + + // Read rest of match length. + CMP $15, len + BNE readMatchlenDone + +readMatchlenLoop: + CMP src, srcend + BEQ shortSrc + MOVBU.P 1(src), tmp1 + ADDS tmp1, len + BVS shortDst + CMP $255, tmp1 + BEQ readMatchlenLoop + +readMatchlenDone: + ADD $const_minMatch, len + + // Bounds check dst+len. + ADDS dst, len, tmp2 + BCS shortDst + CMP dstend, tmp2 + BHI shortDst + + SUB offset, dst, match + CMP dstorig, match + BHS copyMatchTry8 + + // match < dstorig means the match starts in the dictionary, + // at len(dict) - offset + (dst - dstorig). + SUB dstorig, dst, tmp1 + SUB offset, dictlen, tmp2 + ADDS tmp2, tmp1 + BMI shortDict + ADD dict, tmp1, match + +copyDict: + MOVBU.P 1(match), tmp3 + MOVB.P tmp3, 1(dst) + SUBS $1, len + CCMP NE, dictend, match, $0b0100 // 0100 sets the Z (EQ) flag. + BNE copyDict + + CBZ len, copyMatchDone + + // If the match extends beyond the dictionary, the rest is at dstorig. + // Recompute the offset for the next check. + MOVD dstorig, match + SUB dstorig, dst, offset + +copyMatchTry8: + // Copy doublewords if both len and offset are at least eight. + // A 16-at-a-time loop doesn't provide a further speedup. + CMP $8, len + CCMP HS, offset, $8, $0 + BLO copyMatchTry4 + + AND $7, len, lenRem + SUB $8, len +copyMatchLoop8: + MOVD.P 8(match), tmp1 + MOVD.P tmp1, 8(dst) + SUBS $8, len + BPL copyMatchLoop8 + + MOVD (match)(len), tmp2 // match+len == match+lenRem-8. + ADD lenRem, dst + MOVD $0, len + MOVD tmp2, -8(dst) + B copyMatchDone + +copyMatchTry4: + // Copy words if both len and offset are at least four. + CMP $4, len + CCMP HS, offset, $4, $0 + BLO copyMatchLoop1 + + MOVWU.P 4(match), tmp2 + MOVWU.P tmp2, 4(dst) + SUBS $4, len + BEQ copyMatchDone + +copyMatchLoop1: + // Byte-at-a-time copy for small offsets <= 3. + MOVBU.P 1(match), tmp2 + MOVB.P tmp2, 1(dst) + SUBS $1, len + BNE copyMatchLoop1 + +copyMatchDone: + CMP src, srcend + BNE loop + +end: + CBNZ len, corrupt + SUB dstorig, dst, tmp1 + MOVD tmp1, ret+72(FP) + RET + + // The error cases have distinct labels so we can put different + // return codes here when debugging, or if the error returns need to + // be changed. +shortDict: +shortDst: +shortSrc: +corrupt: + MOVD $-1, tmp1 + MOVD tmp1, ret+72(FP) + RET diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go new file mode 100644 index 000000000..8d9023d10 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go @@ -0,0 +1,10 @@ +//go:build (amd64 || arm || arm64) && !appengine && gc && !noasm +// +build amd64 arm arm64 +// +build !appengine +// +build gc +// +build !noasm + +package lz4block + +//go:noescape +func decodeBlock(dst, src, dict []byte) int diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go new file mode 100644 index 000000000..9f568fbb1 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go @@ -0,0 +1,139 @@ +//go:build (!amd64 && !arm && !arm64) || appengine || !gc || noasm +// +build !amd64,!arm,!arm64 appengine !gc noasm + +package lz4block + +import ( + "encoding/binary" +) + +func decodeBlock(dst, src, dict []byte) (ret int) { + // Restrict capacities so we don't read or write out of bounds. + dst = dst[:len(dst):len(dst)] + src = src[:len(src):len(src)] + + const hasError = -2 + + if len(src) == 0 { + return hasError + } + + defer func() { + if recover() != nil { + ret = hasError + } + }() + + var si, di uint + for si < uint(len(src)) { + // Literals and match lengths (token). + b := uint(src[si]) + si++ + + // Literals. + if lLen := b >> 4; lLen > 0 { + switch { + case lLen < 0xF && si+16 < uint(len(src)): + // Shortcut 1 + // if we have enough room in src and dst, and the literals length + // is small enough (0..14) then copy all 16 bytes, even if not all + // are part of the literals. + copy(dst[di:], src[si:si+16]) + si += lLen + di += lLen + if mLen := b & 0xF; mLen < 0xF { + // Shortcut 2 + // if the match length (4..18) fits within the literals, then copy + // all 18 bytes, even if not all are part of the literals. + mLen += 4 + if offset := u16(src[si:]); mLen <= offset && offset < di { + i := di - offset + // The remaining buffer may not hold 18 bytes. + // See https://github.com/pierrec/lz4/issues/51. + if end := i + 18; end <= uint(len(dst)) { + copy(dst[di:], dst[i:end]) + si += 2 + di += mLen + continue + } + } + } + case lLen == 0xF: + for { + x := uint(src[si]) + if lLen += x; int(lLen) < 0 { + return hasError + } + si++ + if x != 0xFF { + break + } + } + fallthrough + default: + copy(dst[di:di+lLen], src[si:si+lLen]) + si += lLen + di += lLen + } + } + + mLen := b & 0xF + if si == uint(len(src)) && mLen == 0 { + break + } else if si >= uint(len(src)) { + return hasError + } + + offset := u16(src[si:]) + if offset == 0 { + return hasError + } + si += 2 + + // Match. + mLen += minMatch + if mLen == minMatch+0xF { + for { + x := uint(src[si]) + if mLen += x; int(mLen) < 0 { + return hasError + } + si++ + if x != 0xFF { + break + } + } + } + + // Copy the match. + if di < offset { + // The match is beyond our block, meaning the first part + // is in the dictionary. + fromDict := dict[uint(len(dict))+di-offset:] + n := uint(copy(dst[di:di+mLen], fromDict)) + di += n + if mLen -= n; mLen == 0 { + continue + } + // We copied n = offset-di bytes from the dictionary, + // then set di = di+n = offset, so the following code + // copies from dst[di-offset:] = dst[0:]. + } + + expanded := dst[di-offset:] + if mLen > offset { + // Efficiently copy the match dst[di-offset:di] into the dst slice. + bytesToCopy := offset * (mLen / offset) + for n := offset; n <= bytesToCopy+offset; n *= 2 { + copy(expanded[n:], expanded[:n]) + } + di += bytesToCopy + mLen -= bytesToCopy + } + di += uint(copy(dst[di:di+mLen], expanded[:mLen])) + } + + return int(di) +} + +func u16(p []byte) uint { return uint(binary.LittleEndian.Uint16(p)) } diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go new file mode 100644 index 000000000..710ea4281 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go @@ -0,0 +1,19 @@ +package lz4errors + +type Error string + +func (e Error) Error() string { return string(e) } + +const ( + ErrInvalidSourceShortBuffer Error = "lz4: invalid source or destination buffer too short" + ErrInvalidFrame Error = "lz4: bad magic number" + ErrInternalUnhandledState Error = "lz4: unhandled state" + ErrInvalidHeaderChecksum Error = "lz4: invalid header checksum" + ErrInvalidBlockChecksum Error = "lz4: invalid block checksum" + ErrInvalidFrameChecksum Error = "lz4: invalid frame checksum" + ErrOptionInvalidCompressionLevel Error = "lz4: invalid compression level" + ErrOptionClosedOrError Error = "lz4: cannot apply options on closed or in error object" + ErrOptionInvalidBlockSize Error = "lz4: invalid block size" + ErrOptionNotApplicable Error = "lz4: option not applicable" + ErrWriterNotClosed Error = "lz4: writer not closed" +) diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go new file mode 100644 index 000000000..459086f09 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go @@ -0,0 +1,350 @@ +package lz4stream + +import ( + "encoding/binary" + "fmt" + "io" + "sync" + + "github.com/pierrec/lz4/v4/internal/lz4block" + "github.com/pierrec/lz4/v4/internal/lz4errors" + "github.com/pierrec/lz4/v4/internal/xxh32" +) + +type Blocks struct { + Block *FrameDataBlock + Blocks chan chan *FrameDataBlock + mu sync.Mutex + err error +} + +func (b *Blocks) initW(f *Frame, dst io.Writer, num int) { + if num == 1 { + b.Blocks = nil + b.Block = NewFrameDataBlock(f) + return + } + b.Block = nil + if cap(b.Blocks) != num { + b.Blocks = make(chan chan *FrameDataBlock, num) + } + // goroutine managing concurrent block compression goroutines. + go func() { + // Process next block compression item. + for c := range b.Blocks { + // Read the next compressed block result. + // Waiting here ensures that the blocks are output in the order they were sent. + // The incoming channel is always closed as it indicates to the caller that + // the block has been processed. + block := <-c + if block == nil { + // Notify the block compression routine that we are done with its result. + // This is used when a sentinel block is sent to terminate the compression. + close(c) + return + } + // Do not attempt to write the block upon any previous failure. + if b.err == nil { + // Write the block. + if err := block.Write(f, dst); err != nil { + // Keep the first error. + b.err = err + // All pending compression goroutines need to shut down, so we need to keep going. + } + } + close(c) + } + }() +} + +func (b *Blocks) close(f *Frame, num int) error { + if num == 1 { + if b.Block != nil { + b.Block.Close(f) + } + err := b.err + b.err = nil + return err + } + if b.Blocks == nil { + err := b.err + b.err = nil + return err + } + c := make(chan *FrameDataBlock) + b.Blocks <- c + c <- nil + <-c + err := b.err + b.err = nil + return err +} + +// ErrorR returns any error set while uncompressing a stream. +func (b *Blocks) ErrorR() error { + b.mu.Lock() + defer b.mu.Unlock() + return b.err +} + +// initR returns a channel that streams the uncompressed blocks if in concurrent +// mode and no error. When the channel is closed, check for any error with b.ErrorR. +// +// If not in concurrent mode, the uncompressed block is b.Block and the returned error +// needs to be checked. +func (b *Blocks) initR(f *Frame, num int, src io.Reader) (chan []byte, error) { + size := f.Descriptor.Flags.BlockSizeIndex() + if num == 1 { + b.Blocks = nil + b.Block = NewFrameDataBlock(f) + return nil, nil + } + b.Block = nil + blocks := make(chan chan []byte, num) + // data receives the uncompressed blocks. + data := make(chan []byte) + // Read blocks from the source sequentially + // and uncompress them concurrently. + + // In legacy mode, accrue the uncompress sizes in cum. + var cum uint32 + go func() { + var cumx uint32 + var err error + for b.ErrorR() == nil { + block := NewFrameDataBlock(f) + cumx, err = block.Read(f, src, 0) + if err != nil { + block.Close(f) + break + } + // Recheck for an error as reading may be slow and uncompressing is expensive. + if b.ErrorR() != nil { + block.Close(f) + break + } + c := make(chan []byte) + blocks <- c + go func() { + defer block.Close(f) + data, err := block.Uncompress(f, size.Get(), nil, false) + if err != nil { + b.closeR(err) + // Close the block channel to indicate an error. + close(c) + } else { + c <- data + } + }() + } + // End the collection loop and the data channel. + c := make(chan []byte) + blocks <- c + c <- nil // signal the collection loop that we are done + <-c // wait for the collect loop to complete + if f.isLegacy() && cum == cumx { + err = io.EOF + } + b.closeR(err) + close(data) + }() + // Collect the uncompressed blocks and make them available + // on the returned channel. + go func(leg bool) { + defer close(blocks) + skipBlocks := false + for c := range blocks { + buf, ok := <-c + if !ok { + // A closed channel indicates an error. + // All remaining channels should be discarded. + skipBlocks = true + continue + } + if buf == nil { + // Signal to end the loop. + close(c) + return + } + if skipBlocks { + // A previous error has occurred, skipping remaining channels. + continue + } + // Perform checksum now as the blocks are received in order. + if f.Descriptor.Flags.ContentChecksum() { + _, _ = f.checksum.Write(buf) + } + if leg { + cum += uint32(len(buf)) + } + data <- buf + close(c) + } + }(f.isLegacy()) + return data, nil +} + +// closeR safely sets the error on b if not already set. +func (b *Blocks) closeR(err error) { + b.mu.Lock() + if b.err == nil { + b.err = err + } + b.mu.Unlock() +} + +func NewFrameDataBlock(f *Frame) *FrameDataBlock { + buf := f.Descriptor.Flags.BlockSizeIndex().Get() + return &FrameDataBlock{Data: buf, data: buf} +} + +type FrameDataBlock struct { + Size DataBlockSize + Data []byte // compressed or uncompressed data (.data or .src) + Checksum uint32 + data []byte // buffer for compressed data + src []byte // uncompressed data + err error // used in concurrent mode +} + +func (b *FrameDataBlock) Close(f *Frame) { + b.Size = 0 + b.Checksum = 0 + b.err = nil + if b.data != nil { + // Block was not already closed. + lz4block.Put(b.data) + b.Data = nil + b.data = nil + b.src = nil + } +} + +// Block compression errors are ignored since the buffer is sized appropriately. +func (b *FrameDataBlock) Compress(f *Frame, src []byte, level lz4block.CompressionLevel) *FrameDataBlock { + data := b.data + if f.isLegacy() { + // In legacy mode, the buffer is sized according to CompressBlockBound, + // but only 8Mb is buffered for compression. + src = src[:8<<20] + } else { + data = data[:len(src)] // trigger the incompressible flag in CompressBlock + } + var n int + switch level { + case lz4block.Fast: + n, _ = lz4block.CompressBlock(src, data) + default: + n, _ = lz4block.CompressBlockHC(src, data, level) + } + if n == 0 { + b.Size.UncompressedSet(true) + b.Data = src + } else { + b.Size.UncompressedSet(false) + b.Data = data[:n] + } + b.Size.sizeSet(len(b.Data)) + b.src = src // keep track of the source for content checksum + + if f.Descriptor.Flags.BlockChecksum() { + b.Checksum = xxh32.ChecksumZero(src) + } + return b +} + +func (b *FrameDataBlock) Write(f *Frame, dst io.Writer) error { + // Write is called in the same order as blocks are compressed, + // so content checksum must be done here. + if f.Descriptor.Flags.ContentChecksum() { + _, _ = f.checksum.Write(b.src) + } + buf := f.buf[:] + binary.LittleEndian.PutUint32(buf, uint32(b.Size)) + if _, err := dst.Write(buf[:4]); err != nil { + return err + } + + if _, err := dst.Write(b.Data); err != nil { + return err + } + + if b.Checksum == 0 { + return nil + } + binary.LittleEndian.PutUint32(buf, b.Checksum) + _, err := dst.Write(buf[:4]) + return err +} + +// Read updates b with the next block data, size and checksum if available. +func (b *FrameDataBlock) Read(f *Frame, src io.Reader, cum uint32) (uint32, error) { + x, err := f.readUint32(src) + if err != nil { + return 0, err + } + if f.isLegacy() { + switch x { + case frameMagicLegacy: + // Concatenated legacy frame. + return b.Read(f, src, cum) + case cum: + // Only works in non concurrent mode, for concurrent mode + // it is handled separately. + // Linux kernel format appends the total uncompressed size at the end. + return 0, io.EOF + } + } else if x == 0 { + // Marker for end of stream. + return 0, io.EOF + } + b.Size = DataBlockSize(x) + + size := b.Size.size() + if size > cap(b.data) { + return x, lz4errors.ErrOptionInvalidBlockSize + } + b.data = b.data[:size] + if _, err := io.ReadFull(src, b.data); err != nil { + return x, err + } + if f.Descriptor.Flags.BlockChecksum() { + sum, err := f.readUint32(src) + if err != nil { + return 0, err + } + b.Checksum = sum + } + return x, nil +} + +func (b *FrameDataBlock) Uncompress(f *Frame, dst, dict []byte, sum bool) ([]byte, error) { + if b.Size.Uncompressed() { + n := copy(dst, b.data) + dst = dst[:n] + } else { + n, err := lz4block.UncompressBlock(b.data, dst, dict) + if err != nil { + return nil, err + } + dst = dst[:n] + } + if f.Descriptor.Flags.BlockChecksum() { + if c := xxh32.ChecksumZero(dst); c != b.Checksum { + err := fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidBlockChecksum, c, b.Checksum) + return nil, err + } + } + if sum && f.Descriptor.Flags.ContentChecksum() { + _, _ = f.checksum.Write(dst) + } + return dst, nil +} + +func (f *Frame) readUint32(r io.Reader) (x uint32, err error) { + if _, err = io.ReadFull(r, f.buf[:4]); err != nil { + return + } + x = binary.LittleEndian.Uint32(f.buf[:4]) + return +} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go new file mode 100644 index 000000000..18192a943 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go @@ -0,0 +1,204 @@ +// Package lz4stream provides the types that support reading and writing LZ4 data streams. +package lz4stream + +import ( + "encoding/binary" + "fmt" + "io" + "io/ioutil" + + "github.com/pierrec/lz4/v4/internal/lz4block" + "github.com/pierrec/lz4/v4/internal/lz4errors" + "github.com/pierrec/lz4/v4/internal/xxh32" +) + +//go:generate go run gen.go + +const ( + frameMagic uint32 = 0x184D2204 + frameSkipMagic uint32 = 0x184D2A50 + frameMagicLegacy uint32 = 0x184C2102 +) + +func NewFrame() *Frame { + return &Frame{} +} + +type Frame struct { + buf [15]byte // frame descriptor needs at most 4(magic)+4+8+1=11 bytes + Magic uint32 + Descriptor FrameDescriptor + Blocks Blocks + Checksum uint32 + checksum xxh32.XXHZero +} + +// Reset allows reusing the Frame. +// The Descriptor configuration is not modified. +func (f *Frame) Reset(num int) { + f.Magic = 0 + f.Descriptor.Checksum = 0 + f.Descriptor.ContentSize = 0 + _ = f.Blocks.close(f, num) + f.Checksum = 0 +} + +func (f *Frame) InitW(dst io.Writer, num int, legacy bool) { + if legacy { + f.Magic = frameMagicLegacy + idx := lz4block.Index(lz4block.Block8Mb) + f.Descriptor.Flags.BlockSizeIndexSet(idx) + } else { + f.Magic = frameMagic + f.Descriptor.initW() + } + f.Blocks.initW(f, dst, num) + f.checksum.Reset() +} + +func (f *Frame) CloseW(dst io.Writer, num int) error { + if err := f.Blocks.close(f, num); err != nil { + return err + } + if f.isLegacy() { + return nil + } + buf := f.buf[:0] + // End mark (data block size of uint32(0)). + buf = append(buf, 0, 0, 0, 0) + if f.Descriptor.Flags.ContentChecksum() { + buf = f.checksum.Sum(buf) + } + _, err := dst.Write(buf) + return err +} + +func (f *Frame) isLegacy() bool { + return f.Magic == frameMagicLegacy +} + +func (f *Frame) ParseHeaders(src io.Reader) error { + if f.Magic > 0 { + // Header already read. + return nil + } + +newFrame: + var err error + if f.Magic, err = f.readUint32(src); err != nil { + return err + } + switch m := f.Magic; { + case m == frameMagic || m == frameMagicLegacy: + // All 16 values of frameSkipMagic are valid. + case m>>8 == frameSkipMagic>>8: + skip, err := f.readUint32(src) + if err != nil { + return err + } + if _, err := io.CopyN(ioutil.Discard, src, int64(skip)); err != nil { + return err + } + goto newFrame + default: + return lz4errors.ErrInvalidFrame + } + if err := f.Descriptor.initR(f, src); err != nil { + return err + } + f.checksum.Reset() + return nil +} + +func (f *Frame) InitR(src io.Reader, num int) (chan []byte, error) { + return f.Blocks.initR(f, num, src) +} + +func (f *Frame) CloseR(src io.Reader) (err error) { + if f.isLegacy() { + return nil + } + if !f.Descriptor.Flags.ContentChecksum() { + return nil + } + if f.Checksum, err = f.readUint32(src); err != nil { + return err + } + if c := f.checksum.Sum32(); c != f.Checksum { + return fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidFrameChecksum, c, f.Checksum) + } + return nil +} + +type FrameDescriptor struct { + Flags DescriptorFlags + ContentSize uint64 + Checksum uint8 +} + +func (fd *FrameDescriptor) initW() { + fd.Flags.VersionSet(1) + fd.Flags.BlockIndependenceSet(true) +} + +func (fd *FrameDescriptor) Write(f *Frame, dst io.Writer) error { + if fd.Checksum > 0 { + // Header already written. + return nil + } + + buf := f.buf[:4] + // Write the magic number here even though it belongs to the Frame. + binary.LittleEndian.PutUint32(buf, f.Magic) + if !f.isLegacy() { + buf = buf[:4+2] + binary.LittleEndian.PutUint16(buf[4:], uint16(fd.Flags)) + + if fd.Flags.Size() { + buf = buf[:4+2+8] + binary.LittleEndian.PutUint64(buf[4+2:], fd.ContentSize) + } + fd.Checksum = descriptorChecksum(buf[4:]) + buf = append(buf, fd.Checksum) + } + + _, err := dst.Write(buf) + return err +} + +func (fd *FrameDescriptor) initR(f *Frame, src io.Reader) error { + if f.isLegacy() { + idx := lz4block.Index(lz4block.Block8Mb) + f.Descriptor.Flags.BlockSizeIndexSet(idx) + return nil + } + // Read the flags and the checksum, hoping that there is not content size. + buf := f.buf[:3] + if _, err := io.ReadFull(src, buf); err != nil { + return err + } + descr := binary.LittleEndian.Uint16(buf) + fd.Flags = DescriptorFlags(descr) + if fd.Flags.Size() { + // Append the 8 missing bytes. + buf = buf[:3+8] + if _, err := io.ReadFull(src, buf[3:]); err != nil { + return err + } + fd.ContentSize = binary.LittleEndian.Uint64(buf[2:]) + } + fd.Checksum = buf[len(buf)-1] // the checksum is the last byte + buf = buf[:len(buf)-1] // all descriptor fields except checksum + if c := descriptorChecksum(buf); fd.Checksum != c { + return fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidHeaderChecksum, c, fd.Checksum) + } + // Validate the elements that can be. + if idx := fd.Flags.BlockSizeIndex(); !idx.IsValid() { + return lz4errors.ErrOptionInvalidBlockSize + } + return nil +} + +func descriptorChecksum(buf []byte) byte { + return byte(xxh32.ChecksumZero(buf) >> 8) +} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go new file mode 100644 index 000000000..d33a6be95 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go @@ -0,0 +1,103 @@ +// Code generated by `gen.exe`. DO NOT EDIT. + +package lz4stream + +import "github.com/pierrec/lz4/v4/internal/lz4block" + +// DescriptorFlags is defined as follow: +// field bits +// ----- ---- +// _ 2 +// ContentChecksum 1 +// Size 1 +// BlockChecksum 1 +// BlockIndependence 1 +// Version 2 +// _ 4 +// BlockSizeIndex 3 +// _ 1 +type DescriptorFlags uint16 + +// Getters. +func (x DescriptorFlags) ContentChecksum() bool { return x>>2&1 != 0 } +func (x DescriptorFlags) Size() bool { return x>>3&1 != 0 } +func (x DescriptorFlags) BlockChecksum() bool { return x>>4&1 != 0 } +func (x DescriptorFlags) BlockIndependence() bool { return x>>5&1 != 0 } +func (x DescriptorFlags) Version() uint16 { return uint16(x >> 6 & 0x3) } +func (x DescriptorFlags) BlockSizeIndex() lz4block.BlockSizeIndex { + return lz4block.BlockSizeIndex(x >> 12 & 0x7) +} + +// Setters. +func (x *DescriptorFlags) ContentChecksumSet(v bool) *DescriptorFlags { + const b = 1 << 2 + if v { + *x = *x&^b | b + } else { + *x &^= b + } + return x +} +func (x *DescriptorFlags) SizeSet(v bool) *DescriptorFlags { + const b = 1 << 3 + if v { + *x = *x&^b | b + } else { + *x &^= b + } + return x +} +func (x *DescriptorFlags) BlockChecksumSet(v bool) *DescriptorFlags { + const b = 1 << 4 + if v { + *x = *x&^b | b + } else { + *x &^= b + } + return x +} +func (x *DescriptorFlags) BlockIndependenceSet(v bool) *DescriptorFlags { + const b = 1 << 5 + if v { + *x = *x&^b | b + } else { + *x &^= b + } + return x +} +func (x *DescriptorFlags) VersionSet(v uint16) *DescriptorFlags { + *x = *x&^(0x3<<6) | (DescriptorFlags(v) & 0x3 << 6) + return x +} +func (x *DescriptorFlags) BlockSizeIndexSet(v lz4block.BlockSizeIndex) *DescriptorFlags { + *x = *x&^(0x7<<12) | (DescriptorFlags(v) & 0x7 << 12) + return x +} + +// Code generated by `gen.exe`. DO NOT EDIT. + +// DataBlockSize is defined as follow: +// field bits +// ----- ---- +// size 31 +// Uncompressed 1 +type DataBlockSize uint32 + +// Getters. +func (x DataBlockSize) size() int { return int(x & 0x7FFFFFFF) } +func (x DataBlockSize) Uncompressed() bool { return x>>31&1 != 0 } + +// Setters. +func (x *DataBlockSize) sizeSet(v int) *DataBlockSize { + *x = *x&^0x7FFFFFFF | DataBlockSize(v)&0x7FFFFFFF + return x +} +func (x *DataBlockSize) UncompressedSet(v bool) *DataBlockSize { + const b = 1 << 31 + if v { + *x = *x&^b | b + } else { + *x &^= b + } + return x +} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go new file mode 100644 index 000000000..651d10c10 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go @@ -0,0 +1,212 @@ +// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version). +// (ported from the reference implementation https://github.com/Cyan4973/xxHash/) +package xxh32 + +import ( + "encoding/binary" +) + +const ( + prime1 uint32 = 2654435761 + prime2 uint32 = 2246822519 + prime3 uint32 = 3266489917 + prime4 uint32 = 668265263 + prime5 uint32 = 374761393 + + primeMask = 0xFFFFFFFF + prime1plus2 = uint32((uint64(prime1) + uint64(prime2)) & primeMask) // 606290984 + prime1minus = uint32((-int64(prime1)) & primeMask) // 1640531535 +) + +// XXHZero represents an xxhash32 object with seed 0. +type XXHZero struct { + v [4]uint32 + totalLen uint64 + buf [16]byte + bufused int +} + +// Sum appends the current hash to b and returns the resulting slice. +// It does not change the underlying hash state. +func (xxh XXHZero) Sum(b []byte) []byte { + h32 := xxh.Sum32() + return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24)) +} + +// Reset resets the Hash to its initial state. +func (xxh *XXHZero) Reset() { + xxh.v[0] = prime1plus2 + xxh.v[1] = prime2 + xxh.v[2] = 0 + xxh.v[3] = prime1minus + xxh.totalLen = 0 + xxh.bufused = 0 +} + +// Size returns the number of bytes returned by Sum(). +func (xxh *XXHZero) Size() int { + return 4 +} + +// BlockSizeIndex gives the minimum number of bytes accepted by Write(). +func (xxh *XXHZero) BlockSize() int { + return 1 +} + +// Write adds input bytes to the Hash. +// It never returns an error. +func (xxh *XXHZero) Write(input []byte) (int, error) { + if xxh.totalLen == 0 { + xxh.Reset() + } + n := len(input) + m := xxh.bufused + + xxh.totalLen += uint64(n) + + r := len(xxh.buf) - m + if n < r { + copy(xxh.buf[m:], input) + xxh.bufused += len(input) + return n, nil + } + + var buf *[16]byte + if m != 0 { + // some data left from previous update + buf = &xxh.buf + c := copy(buf[m:], input) + n -= c + input = input[c:] + } + update(&xxh.v, buf, input) + xxh.bufused = copy(xxh.buf[:], input[n-n%16:]) + + return n, nil +} + +// Portable version of update. This updates v by processing all of buf +// (if not nil) and all full 16-byte blocks of input. +func updateGo(v *[4]uint32, buf *[16]byte, input []byte) { + // Causes compiler to work directly from registers instead of stack: + v1, v2, v3, v4 := v[0], v[1], v[2], v[3] + + if buf != nil { + v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1 + v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1 + v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1 + v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1 + } + + for ; len(input) >= 16; input = input[16:] { + sub := input[:16] //BCE hint for compiler + v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1 + v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1 + v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1 + v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1 + } + v[0], v[1], v[2], v[3] = v1, v2, v3, v4 +} + +// Sum32 returns the 32 bits Hash value. +func (xxh *XXHZero) Sum32() uint32 { + h32 := uint32(xxh.totalLen) + if h32 >= 16 { + h32 += rol1(xxh.v[0]) + rol7(xxh.v[1]) + rol12(xxh.v[2]) + rol18(xxh.v[3]) + } else { + h32 += prime5 + } + + p := 0 + n := xxh.bufused + buf := xxh.buf + for n := n - 4; p <= n; p += 4 { + h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime3 + h32 = rol17(h32) * prime4 + } + for ; p < n; p++ { + h32 += uint32(buf[p]) * prime5 + h32 = rol11(h32) * prime1 + } + + h32 ^= h32 >> 15 + h32 *= prime2 + h32 ^= h32 >> 13 + h32 *= prime3 + h32 ^= h32 >> 16 + + return h32 +} + +// Portable version of ChecksumZero. +func checksumZeroGo(input []byte) uint32 { + n := len(input) + h32 := uint32(n) + + if n < 16 { + h32 += prime5 + } else { + v1 := prime1plus2 + v2 := prime2 + v3 := uint32(0) + v4 := prime1minus + p := 0 + for n := n - 16; p <= n; p += 16 { + sub := input[p:][:16] //BCE hint for compiler + v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1 + v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1 + v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1 + v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1 + } + input = input[p:] + n -= p + h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + } + + p := 0 + for n := n - 4; p <= n; p += 4 { + h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime3 + h32 = rol17(h32) * prime4 + } + for p < n { + h32 += uint32(input[p]) * prime5 + h32 = rol11(h32) * prime1 + p++ + } + + h32 ^= h32 >> 15 + h32 *= prime2 + h32 ^= h32 >> 13 + h32 *= prime3 + h32 ^= h32 >> 16 + + return h32 +} + +func rol1(u uint32) uint32 { + return u<<1 | u>>31 +} + +func rol7(u uint32) uint32 { + return u<<7 | u>>25 +} + +func rol11(u uint32) uint32 { + return u<<11 | u>>21 +} + +func rol12(u uint32) uint32 { + return u<<12 | u>>20 +} + +func rol13(u uint32) uint32 { + return u<<13 | u>>19 +} + +func rol17(u uint32) uint32 { + return u<<17 | u>>15 +} + +func rol18(u uint32) uint32 { + return u<<18 | u>>14 +} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go new file mode 100644 index 000000000..0978b2665 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go @@ -0,0 +1,11 @@ +// +build !noasm + +package xxh32 + +// ChecksumZero returns the 32-bit hash of input. +// +//go:noescape +func ChecksumZero(input []byte) uint32 + +//go:noescape +func update(v *[4]uint32, buf *[16]byte, input []byte) diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s new file mode 100644 index 000000000..c18ffd574 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s @@ -0,0 +1,251 @@ +// +build !noasm + +#include "go_asm.h" +#include "textflag.h" + +// Register allocation. +#define p R0 +#define n R1 +#define h R2 +#define v1 R2 // Alias for h. +#define v2 R3 +#define v3 R4 +#define v4 R5 +#define x1 R6 +#define x2 R7 +#define x3 R8 +#define x4 R9 + +// We need the primes in registers. The 16-byte loop only uses prime{1,2}. +#define prime1r R11 +#define prime2r R12 +#define prime3r R3 // The rest can alias v{2-4}. +#define prime4r R4 +#define prime5r R5 + +// Update round macros. These read from and increment p. + +#define round16aligned \ + MOVM.IA.W (p), [x1, x2, x3, x4] \ + \ + MULA x1, prime2r, v1, v1 \ + MULA x2, prime2r, v2, v2 \ + MULA x3, prime2r, v3, v3 \ + MULA x4, prime2r, v4, v4 \ + \ + MOVW v1 @> 19, v1 \ + MOVW v2 @> 19, v2 \ + MOVW v3 @> 19, v3 \ + MOVW v4 @> 19, v4 \ + \ + MUL prime1r, v1 \ + MUL prime1r, v2 \ + MUL prime1r, v3 \ + MUL prime1r, v4 \ + +#define round16unaligned \ + MOVBU.P 16(p), x1 \ + MOVBU -15(p), x2 \ + ORR x2 << 8, x1 \ + MOVBU -14(p), x3 \ + MOVBU -13(p), x4 \ + ORR x4 << 8, x3 \ + ORR x3 << 16, x1 \ + \ + MULA x1, prime2r, v1, v1 \ + MOVW v1 @> 19, v1 \ + MUL prime1r, v1 \ + \ + MOVBU -12(p), x1 \ + MOVBU -11(p), x2 \ + ORR x2 << 8, x1 \ + MOVBU -10(p), x3 \ + MOVBU -9(p), x4 \ + ORR x4 << 8, x3 \ + ORR x3 << 16, x1 \ + \ + MULA x1, prime2r, v2, v2 \ + MOVW v2 @> 19, v2 \ + MUL prime1r, v2 \ + \ + MOVBU -8(p), x1 \ + MOVBU -7(p), x2 \ + ORR x2 << 8, x1 \ + MOVBU -6(p), x3 \ + MOVBU -5(p), x4 \ + ORR x4 << 8, x3 \ + ORR x3 << 16, x1 \ + \ + MULA x1, prime2r, v3, v3 \ + MOVW v3 @> 19, v3 \ + MUL prime1r, v3 \ + \ + MOVBU -4(p), x1 \ + MOVBU -3(p), x2 \ + ORR x2 << 8, x1 \ + MOVBU -2(p), x3 \ + MOVBU -1(p), x4 \ + ORR x4 << 8, x3 \ + ORR x3 << 16, x1 \ + \ + MULA x1, prime2r, v4, v4 \ + MOVW v4 @> 19, v4 \ + MUL prime1r, v4 \ + + +// func ChecksumZero([]byte) uint32 +TEXT ·ChecksumZero(SB), NOFRAME|NOSPLIT, $-4-16 + MOVW input_base+0(FP), p + MOVW input_len+4(FP), n + + MOVW $const_prime1, prime1r + MOVW $const_prime2, prime2r + + // Set up h for n < 16. It's tempting to say {ADD prime5, n, h} + // here, but that's a pseudo-op that generates a load through R11. + MOVW $const_prime5, prime5r + ADD prime5r, n, h + CMP $0, n + BEQ end + + // We let n go negative so we can do comparisons with SUB.S + // instead of separate CMP. + SUB.S $16, n + BMI loop16done + + ADD prime1r, prime2r, v1 + MOVW prime2r, v2 + MOVW $0, v3 + RSB $0, prime1r, v4 + + TST $3, p + BNE loop16unaligned + +loop16aligned: + SUB.S $16, n + round16aligned + BPL loop16aligned + B loop16finish + +loop16unaligned: + SUB.S $16, n + round16unaligned + BPL loop16unaligned + +loop16finish: + MOVW v1 @> 31, h + ADD v2 @> 25, h + ADD v3 @> 20, h + ADD v4 @> 14, h + + // h += len(input) with v2 as temporary. + MOVW input_len+4(FP), v2 + ADD v2, h + +loop16done: + ADD $16, n // Restore number of bytes left. + + SUB.S $4, n + MOVW $const_prime3, prime3r + BMI loop4done + MOVW $const_prime4, prime4r + + TST $3, p + BNE loop4unaligned + +loop4aligned: + SUB.S $4, n + + MOVW.P 4(p), x1 + MULA prime3r, x1, h, h + MOVW h @> 15, h + MUL prime4r, h + + BPL loop4aligned + B loop4done + +loop4unaligned: + SUB.S $4, n + + MOVBU.P 4(p), x1 + MOVBU -3(p), x2 + ORR x2 << 8, x1 + MOVBU -2(p), x3 + ORR x3 << 16, x1 + MOVBU -1(p), x4 + ORR x4 << 24, x1 + + MULA prime3r, x1, h, h + MOVW h @> 15, h + MUL prime4r, h + + BPL loop4unaligned + +loop4done: + ADD.S $4, n // Restore number of bytes left. + BEQ end + + MOVW $const_prime5, prime5r + +loop1: + SUB.S $1, n + + MOVBU.P 1(p), x1 + MULA prime5r, x1, h, h + MOVW h @> 21, h + MUL prime1r, h + + BNE loop1 + +end: + MOVW $const_prime3, prime3r + EOR h >> 15, h + MUL prime2r, h + EOR h >> 13, h + MUL prime3r, h + EOR h >> 16, h + + MOVW h, ret+12(FP) + RET + + +// func update(v *[4]uint64, buf *[16]byte, p []byte) +TEXT ·update(SB), NOFRAME|NOSPLIT, $-4-20 + MOVW v+0(FP), p + MOVM.IA (p), [v1, v2, v3, v4] + + MOVW $const_prime1, prime1r + MOVW $const_prime2, prime2r + + // Process buf, if not nil. + MOVW buf+4(FP), p + CMP $0, p + BEQ noBuffered + + round16aligned + +noBuffered: + MOVW input_base +8(FP), p + MOVW input_len +12(FP), n + + SUB.S $16, n + BMI end + + TST $3, p + BNE loop16unaligned + +loop16aligned: + SUB.S $16, n + round16aligned + BPL loop16aligned + B end + +loop16unaligned: + SUB.S $16, n + round16unaligned + BPL loop16unaligned + +end: + MOVW v+0(FP), p + MOVM.IA [v1, v2, v3, v4], (p) + RET diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go new file mode 100644 index 000000000..c96b59b8c --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go @@ -0,0 +1,10 @@ +// +build !arm noasm + +package xxh32 + +// ChecksumZero returns the 32-bit hash of input. +func ChecksumZero(input []byte) uint32 { return checksumZeroGo(input) } + +func update(v *[4]uint32, buf *[16]byte, input []byte) { + updateGo(v, buf, input) +} diff --git a/vendor/github.com/pierrec/lz4/v4/lz4.go b/vendor/github.com/pierrec/lz4/v4/lz4.go new file mode 100644 index 000000000..a62022e08 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/lz4.go @@ -0,0 +1,157 @@ +// Package lz4 implements reading and writing lz4 compressed data. +// +// The package supports both the LZ4 stream format, +// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html, +// and the LZ4 block format, defined at +// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html. +// +// See https://github.com/lz4/lz4 for the reference C implementation. +package lz4 + +import ( + "github.com/pierrec/lz4/v4/internal/lz4block" + "github.com/pierrec/lz4/v4/internal/lz4errors" +) + +func _() { + // Safety checks for duplicated elements. + var x [1]struct{} + _ = x[lz4block.CompressionLevel(Fast)-lz4block.Fast] + _ = x[Block64Kb-BlockSize(lz4block.Block64Kb)] + _ = x[Block256Kb-BlockSize(lz4block.Block256Kb)] + _ = x[Block1Mb-BlockSize(lz4block.Block1Mb)] + _ = x[Block4Mb-BlockSize(lz4block.Block4Mb)] +} + +// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible. +func CompressBlockBound(n int) int { + return lz4block.CompressBlockBound(n) +} + +// UncompressBlock uncompresses the source buffer into the destination one, +// and returns the uncompressed size. +// +// The destination buffer must be sized appropriately. +// +// An error is returned if the source data is invalid or the destination buffer is too small. +func UncompressBlock(src, dst []byte) (int, error) { + return lz4block.UncompressBlock(src, dst, nil) +} + +// UncompressBlockWithDict uncompresses the source buffer into the destination one using a +// dictionary, and returns the uncompressed size. +// +// The destination buffer must be sized appropriately. +// +// An error is returned if the source data is invalid or the destination buffer is too small. +func UncompressBlockWithDict(src, dst, dict []byte) (int, error) { + return lz4block.UncompressBlock(src, dst, dict) +} + +// A Compressor compresses data into the LZ4 block format. +// It uses a fast compression algorithm. +// +// A Compressor is not safe for concurrent use by multiple goroutines. +// +// Use a Writer to compress into the LZ4 stream format. +type Compressor struct{ c lz4block.Compressor } + +// CompressBlock compresses the source buffer src into the destination dst. +// +// If compression is successful, the first return value is the size of the +// compressed data, which is always >0. +// +// If dst has length at least CompressBlockBound(len(src)), compression always +// succeeds. Otherwise, the first return value is zero. The error return is +// non-nil if the compressed data does not fit in dst, but it might fit in a +// larger buffer that is still smaller than CompressBlockBound(len(src)). The +// return value (0, nil) means the data is likely incompressible and a buffer +// of length CompressBlockBound(len(src)) should be passed in. +func (c *Compressor) CompressBlock(src, dst []byte) (int, error) { + return c.c.CompressBlock(src, dst) +} + +// CompressBlock compresses the source buffer into the destination one. +// This is the fast version of LZ4 compression and also the default one. +// +// The argument hashTable is scratch space for a hash table used by the +// compressor. If provided, it should have length at least 1<<16. If it is +// shorter (or nil), CompressBlock allocates its own hash table. +// +// The size of the compressed data is returned. +// +// If the destination buffer size is lower than CompressBlockBound and +// the compressed size is 0 and no error, then the data is incompressible. +// +// An error is returned if the destination buffer is too small. + +// CompressBlock is equivalent to Compressor.CompressBlock. +// The final argument is ignored and should be set to nil. +// +// This function is deprecated. Use a Compressor instead. +func CompressBlock(src, dst []byte, _ []int) (int, error) { + return lz4block.CompressBlock(src, dst) +} + +// A CompressorHC compresses data into the LZ4 block format. +// Its compression ratio is potentially better than that of a Compressor, +// but it is also slower and requires more memory. +// +// A Compressor is not safe for concurrent use by multiple goroutines. +// +// Use a Writer to compress into the LZ4 stream format. +type CompressorHC struct { + // Level is the maximum search depth for compression. + // Values <= 0 mean no maximum. + Level CompressionLevel + c lz4block.CompressorHC +} + +// CompressBlock compresses the source buffer src into the destination dst. +// +// If compression is successful, the first return value is the size of the +// compressed data, which is always >0. +// +// If dst has length at least CompressBlockBound(len(src)), compression always +// succeeds. Otherwise, the first return value is zero. The error return is +// non-nil if the compressed data does not fit in dst, but it might fit in a +// larger buffer that is still smaller than CompressBlockBound(len(src)). The +// return value (0, nil) means the data is likely incompressible and a buffer +// of length CompressBlockBound(len(src)) should be passed in. +func (c *CompressorHC) CompressBlock(src, dst []byte) (int, error) { + return c.c.CompressBlock(src, dst, lz4block.CompressionLevel(c.Level)) +} + +// CompressBlockHC is equivalent to CompressorHC.CompressBlock. +// The final two arguments are ignored and should be set to nil. +// +// This function is deprecated. Use a CompressorHC instead. +func CompressBlockHC(src, dst []byte, depth CompressionLevel, _, _ []int) (int, error) { + return lz4block.CompressBlockHC(src, dst, lz4block.CompressionLevel(depth)) +} + +const ( + // ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed + // block is corrupted or the destination buffer is not large enough for the uncompressed data. + ErrInvalidSourceShortBuffer = lz4errors.ErrInvalidSourceShortBuffer + // ErrInvalidFrame is returned when reading an invalid LZ4 archive. + ErrInvalidFrame = lz4errors.ErrInvalidFrame + // ErrInternalUnhandledState is an internal error. + ErrInternalUnhandledState = lz4errors.ErrInternalUnhandledState + // ErrInvalidHeaderChecksum is returned when reading a frame. + ErrInvalidHeaderChecksum = lz4errors.ErrInvalidHeaderChecksum + // ErrInvalidBlockChecksum is returned when reading a frame. + ErrInvalidBlockChecksum = lz4errors.ErrInvalidBlockChecksum + // ErrInvalidFrameChecksum is returned when reading a frame. + ErrInvalidFrameChecksum = lz4errors.ErrInvalidFrameChecksum + // ErrOptionInvalidCompressionLevel is returned when the supplied compression level is invalid. + ErrOptionInvalidCompressionLevel = lz4errors.ErrOptionInvalidCompressionLevel + // ErrOptionClosedOrError is returned when an option is applied to a closed or in error object. + ErrOptionClosedOrError = lz4errors.ErrOptionClosedOrError + // ErrOptionInvalidBlockSize is returned when + ErrOptionInvalidBlockSize = lz4errors.ErrOptionInvalidBlockSize + // ErrOptionNotApplicable is returned when trying to apply an option to an object not supporting it. + ErrOptionNotApplicable = lz4errors.ErrOptionNotApplicable + // ErrWriterNotClosed is returned when attempting to reset an unclosed writer. + ErrWriterNotClosed = lz4errors.ErrWriterNotClosed +) diff --git a/vendor/github.com/pierrec/lz4/v4/options.go b/vendor/github.com/pierrec/lz4/v4/options.go new file mode 100644 index 000000000..46a873803 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/options.go @@ -0,0 +1,214 @@ +package lz4 + +import ( + "fmt" + "reflect" + "runtime" + + "github.com/pierrec/lz4/v4/internal/lz4block" + "github.com/pierrec/lz4/v4/internal/lz4errors" +) + +//go:generate go run golang.org/x/tools/cmd/stringer -type=BlockSize,CompressionLevel -output options_gen.go + +type ( + applier interface { + Apply(...Option) error + private() + } + // Option defines the parameters to setup an LZ4 Writer or Reader. + Option func(applier) error +) + +// String returns a string representation of the option with its parameter(s). +func (o Option) String() string { + return o(nil).Error() +} + +// Default options. +var ( + DefaultBlockSizeOption = BlockSizeOption(Block4Mb) + DefaultChecksumOption = ChecksumOption(true) + DefaultConcurrency = ConcurrencyOption(1) + defaultOnBlockDone = OnBlockDoneOption(nil) +) + +const ( + Block64Kb BlockSize = 1 << (16 + iota*2) + Block256Kb + Block1Mb + Block4Mb +) + +// BlockSizeIndex defines the size of the blocks to be compressed. +type BlockSize uint32 + +// BlockSizeOption defines the maximum size of compressed blocks (default=Block4Mb). +func BlockSizeOption(size BlockSize) Option { + return func(a applier) error { + switch w := a.(type) { + case nil: + s := fmt.Sprintf("BlockSizeOption(%s)", size) + return lz4errors.Error(s) + case *Writer: + size := uint32(size) + if !lz4block.IsValid(size) { + return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidBlockSize, size) + } + w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size)) + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} + +// BlockChecksumOption enables or disables block checksum (default=false). +func BlockChecksumOption(flag bool) Option { + return func(a applier) error { + switch w := a.(type) { + case nil: + s := fmt.Sprintf("BlockChecksumOption(%v)", flag) + return lz4errors.Error(s) + case *Writer: + w.frame.Descriptor.Flags.BlockChecksumSet(flag) + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} + +// ChecksumOption enables/disables all blocks or content checksum (default=true). +func ChecksumOption(flag bool) Option { + return func(a applier) error { + switch w := a.(type) { + case nil: + s := fmt.Sprintf("ChecksumOption(%v)", flag) + return lz4errors.Error(s) + case *Writer: + w.frame.Descriptor.Flags.ContentChecksumSet(flag) + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} + +// SizeOption sets the size of the original uncompressed data (default=0). It is useful to know the size of the +// whole uncompressed data stream. +func SizeOption(size uint64) Option { + return func(a applier) error { + switch w := a.(type) { + case nil: + s := fmt.Sprintf("SizeOption(%d)", size) + return lz4errors.Error(s) + case *Writer: + w.frame.Descriptor.Flags.SizeSet(size > 0) + w.frame.Descriptor.ContentSize = size + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} + +// ConcurrencyOption sets the number of go routines used for compression. +// If n <= 0, then the output of runtime.GOMAXPROCS(0) is used. +func ConcurrencyOption(n int) Option { + if n <= 0 { + n = runtime.GOMAXPROCS(0) + } + return func(a applier) error { + switch rw := a.(type) { + case nil: + s := fmt.Sprintf("ConcurrencyOption(%d)", n) + return lz4errors.Error(s) + case *Writer: + rw.num = n + return nil + case *Reader: + rw.num = n + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} + +// CompressionLevel defines the level of compression to use. The higher the better, but slower, compression. +type CompressionLevel uint32 + +const ( + Fast CompressionLevel = 0 + Level1 CompressionLevel = 1 << (8 + iota) + Level2 + Level3 + Level4 + Level5 + Level6 + Level7 + Level8 + Level9 +) + +// CompressionLevelOption defines the compression level (default=Fast). +func CompressionLevelOption(level CompressionLevel) Option { + return func(a applier) error { + switch w := a.(type) { + case nil: + s := fmt.Sprintf("CompressionLevelOption(%s)", level) + return lz4errors.Error(s) + case *Writer: + switch level { + case Fast, Level1, Level2, Level3, Level4, Level5, Level6, Level7, Level8, Level9: + default: + return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidCompressionLevel, level) + } + w.level = lz4block.CompressionLevel(level) + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} + +func onBlockDone(int) {} + +// OnBlockDoneOption is triggered when a block has been processed. For a Writer, it is when is has been compressed, +// for a Reader, it is when it has been uncompressed. +func OnBlockDoneOption(handler func(size int)) Option { + if handler == nil { + handler = onBlockDone + } + return func(a applier) error { + switch rw := a.(type) { + case nil: + s := fmt.Sprintf("OnBlockDoneOption(%s)", reflect.TypeOf(handler).String()) + return lz4errors.Error(s) + case *Writer: + rw.handler = handler + return nil + case *Reader: + rw.handler = handler + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} + +// LegacyOption provides support for writing LZ4 frames in the legacy format. +// +// See https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame. +// +// NB. compressed Linux kernel images use a tweaked LZ4 legacy format where +// the compressed stream is followed by the original (uncompressed) size of +// the kernel (https://events.static.linuxfound.org/sites/events/files/lcjpcojp13_klee.pdf). +// This is also supported as a special case. +func LegacyOption(legacy bool) Option { + return func(a applier) error { + switch rw := a.(type) { + case nil: + s := fmt.Sprintf("LegacyOption(%v)", legacy) + return lz4errors.Error(s) + case *Writer: + rw.legacy = legacy + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} diff --git a/vendor/github.com/pierrec/lz4/v4/options_gen.go b/vendor/github.com/pierrec/lz4/v4/options_gen.go new file mode 100644 index 000000000..2de814909 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/options_gen.go @@ -0,0 +1,92 @@ +// Code generated by "stringer -type=BlockSize,CompressionLevel -output options_gen.go"; DO NOT EDIT. + +package lz4 + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Block64Kb-65536] + _ = x[Block256Kb-262144] + _ = x[Block1Mb-1048576] + _ = x[Block4Mb-4194304] +} + +const ( + _BlockSize_name_0 = "Block64Kb" + _BlockSize_name_1 = "Block256Kb" + _BlockSize_name_2 = "Block1Mb" + _BlockSize_name_3 = "Block4Mb" +) + +func (i BlockSize) String() string { + switch { + case i == 65536: + return _BlockSize_name_0 + case i == 262144: + return _BlockSize_name_1 + case i == 1048576: + return _BlockSize_name_2 + case i == 4194304: + return _BlockSize_name_3 + default: + return "BlockSize(" + strconv.FormatInt(int64(i), 10) + ")" + } +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Fast-0] + _ = x[Level1-512] + _ = x[Level2-1024] + _ = x[Level3-2048] + _ = x[Level4-4096] + _ = x[Level5-8192] + _ = x[Level6-16384] + _ = x[Level7-32768] + _ = x[Level8-65536] + _ = x[Level9-131072] +} + +const ( + _CompressionLevel_name_0 = "Fast" + _CompressionLevel_name_1 = "Level1" + _CompressionLevel_name_2 = "Level2" + _CompressionLevel_name_3 = "Level3" + _CompressionLevel_name_4 = "Level4" + _CompressionLevel_name_5 = "Level5" + _CompressionLevel_name_6 = "Level6" + _CompressionLevel_name_7 = "Level7" + _CompressionLevel_name_8 = "Level8" + _CompressionLevel_name_9 = "Level9" +) + +func (i CompressionLevel) String() string { + switch { + case i == 0: + return _CompressionLevel_name_0 + case i == 512: + return _CompressionLevel_name_1 + case i == 1024: + return _CompressionLevel_name_2 + case i == 2048: + return _CompressionLevel_name_3 + case i == 4096: + return _CompressionLevel_name_4 + case i == 8192: + return _CompressionLevel_name_5 + case i == 16384: + return _CompressionLevel_name_6 + case i == 32768: + return _CompressionLevel_name_7 + case i == 65536: + return _CompressionLevel_name_8 + case i == 131072: + return _CompressionLevel_name_9 + default: + return "CompressionLevel(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/pierrec/lz4/v4/reader.go b/vendor/github.com/pierrec/lz4/v4/reader.go new file mode 100644 index 000000000..275daad7c --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/reader.go @@ -0,0 +1,275 @@ +package lz4 + +import ( + "bytes" + "io" + + "github.com/pierrec/lz4/v4/internal/lz4block" + "github.com/pierrec/lz4/v4/internal/lz4errors" + "github.com/pierrec/lz4/v4/internal/lz4stream" +) + +var readerStates = []aState{ + noState: newState, + errorState: newState, + newState: readState, + readState: closedState, + closedState: newState, +} + +// NewReader returns a new LZ4 frame decoder. +func NewReader(r io.Reader) *Reader { + return newReader(r, false) +} + +func newReader(r io.Reader, legacy bool) *Reader { + zr := &Reader{frame: lz4stream.NewFrame()} + zr.state.init(readerStates) + _ = zr.Apply(DefaultConcurrency, defaultOnBlockDone) + zr.Reset(r) + return zr +} + +// Reader allows reading an LZ4 stream. +type Reader struct { + state _State + src io.Reader // source reader + num int // concurrency level + frame *lz4stream.Frame // frame being read + data []byte // block buffer allocated in non concurrent mode + reads chan []byte // pending data + idx int // size of pending data + handler func(int) + cum uint32 + dict []byte +} + +func (*Reader) private() {} + +func (r *Reader) Apply(options ...Option) (err error) { + defer r.state.check(&err) + switch r.state.state { + case newState: + case errorState: + return r.state.err + default: + return lz4errors.ErrOptionClosedOrError + } + for _, o := range options { + if err = o(r); err != nil { + return + } + } + return +} + +// Size returns the size of the underlying uncompressed data, if set in the stream. +func (r *Reader) Size() int { + switch r.state.state { + case readState, closedState: + if r.frame.Descriptor.Flags.Size() { + return int(r.frame.Descriptor.ContentSize) + } + } + return 0 +} + +func (r *Reader) isNotConcurrent() bool { + return r.num == 1 +} + +func (r *Reader) init() error { + err := r.frame.ParseHeaders(r.src) + if err != nil { + return err + } + if !r.frame.Descriptor.Flags.BlockIndependence() { + // We can't decompress dependent blocks concurrently. + // Instead of throwing an error to the user, silently drop concurrency + r.num = 1 + } + data, err := r.frame.InitR(r.src, r.num) + if err != nil { + return err + } + r.reads = data + r.idx = 0 + size := r.frame.Descriptor.Flags.BlockSizeIndex() + r.data = size.Get() + r.cum = 0 + return nil +} + +func (r *Reader) Read(buf []byte) (n int, err error) { + defer r.state.check(&err) + switch r.state.state { + case readState: + case closedState, errorState: + return 0, r.state.err + case newState: + // First initialization. + if err = r.init(); r.state.next(err) { + return + } + default: + return 0, r.state.fail() + } + for len(buf) > 0 { + var bn int + if r.idx == 0 { + if r.isNotConcurrent() { + bn, err = r.read(buf) + } else { + lz4block.Put(r.data) + r.data = <-r.reads + if len(r.data) == 0 { + // No uncompressed data: something went wrong or we are done. + err = r.frame.Blocks.ErrorR() + } + } + switch err { + case nil: + case io.EOF: + if er := r.frame.CloseR(r.src); er != nil { + err = er + } + lz4block.Put(r.data) + r.data = nil + return + default: + return + } + } + if bn == 0 { + // Fill buf with buffered data. + bn = copy(buf, r.data[r.idx:]) + r.idx += bn + if r.idx == len(r.data) { + // All data read, get ready for the next Read. + r.idx = 0 + } + } + buf = buf[bn:] + n += bn + r.handler(bn) + } + return +} + +// read uncompresses the next block as follow: +// - if buf has enough room, the block is uncompressed into it directly +// and the lenght of used space is returned +// - else, the uncompress data is stored in r.data and 0 is returned +func (r *Reader) read(buf []byte) (int, error) { + block := r.frame.Blocks.Block + _, err := block.Read(r.frame, r.src, r.cum) + if err != nil { + return 0, err + } + var direct bool + dst := r.data[:cap(r.data)] + if len(buf) >= len(dst) { + // Uncompress directly into buf. + direct = true + dst = buf + } + dst, err = block.Uncompress(r.frame, dst, r.dict, true) + if err != nil { + return 0, err + } + if !r.frame.Descriptor.Flags.BlockIndependence() { + if len(r.dict)+len(dst) > 128*1024 { + preserveSize := 64*1024 - len(dst) + if preserveSize < 0 { + preserveSize = 0 + } + r.dict = r.dict[len(r.dict)-preserveSize:] + } + r.dict = append(r.dict, dst...) + } + r.cum += uint32(len(dst)) + if direct { + return len(dst), nil + } + r.data = dst + return 0, nil +} + +// Reset clears the state of the Reader r such that it is equivalent to its +// initial state from NewReader, but instead reading from reader. +// No access to reader is performed. +func (r *Reader) Reset(reader io.Reader) { + if r.data != nil { + lz4block.Put(r.data) + r.data = nil + } + r.frame.Reset(r.num) + r.state.reset() + r.src = reader + r.reads = nil +} + +// WriteTo efficiently uncompresses the data from the Reader underlying source to w. +func (r *Reader) WriteTo(w io.Writer) (n int64, err error) { + switch r.state.state { + case closedState, errorState: + return 0, r.state.err + case newState: + if err = r.init(); r.state.next(err) { + return + } + default: + return 0, r.state.fail() + } + defer r.state.nextd(&err) + + var data []byte + if r.isNotConcurrent() { + size := r.frame.Descriptor.Flags.BlockSizeIndex() + data = size.Get() + defer lz4block.Put(data) + } + for { + var bn int + var dst []byte + if r.isNotConcurrent() { + bn, err = r.read(data) + dst = data[:bn] + } else { + lz4block.Put(dst) + dst = <-r.reads + bn = len(dst) + if bn == 0 { + // No uncompressed data: something went wrong or we are done. + err = r.frame.Blocks.ErrorR() + } + } + switch err { + case nil: + case io.EOF: + err = r.frame.CloseR(r.src) + return + default: + return + } + r.handler(bn) + bn, err = w.Write(dst) + n += int64(bn) + if err != nil { + return + } + } +} + +// ValidFrameHeader returns a bool indicating if the given bytes slice matches a LZ4 header. +func ValidFrameHeader(in []byte) (bool, error) { + f := lz4stream.NewFrame() + err := f.ParseHeaders(bytes.NewReader(in)) + if err == nil { + return true, nil + } + if err == lz4errors.ErrInvalidFrame { + return false, nil + } + return false, err +} diff --git a/vendor/github.com/pierrec/lz4/v4/state.go b/vendor/github.com/pierrec/lz4/v4/state.go new file mode 100644 index 000000000..d94f04d05 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/state.go @@ -0,0 +1,75 @@ +package lz4 + +import ( + "errors" + "fmt" + "io" + + "github.com/pierrec/lz4/v4/internal/lz4errors" +) + +//go:generate go run golang.org/x/tools/cmd/stringer -type=aState -output state_gen.go + +const ( + noState aState = iota // uninitialized reader + errorState // unrecoverable error encountered + newState // instantiated object + readState // reading data + writeState // writing data + closedState // all done +) + +type ( + aState uint8 + _State struct { + states []aState + state aState + err error + } +) + +func (s *_State) init(states []aState) { + s.states = states + s.state = states[0] +} + +func (s *_State) reset() { + s.state = s.states[0] + s.err = nil +} + +// next sets the state to the next one unless it is passed a non nil error. +// It returns whether or not it is in error. +func (s *_State) next(err error) bool { + if err != nil { + s.err = fmt.Errorf("%s: %w", s.state, err) + s.state = errorState + return true + } + s.state = s.states[s.state] + return false +} + +// nextd is like next but for defers. +func (s *_State) nextd(errp *error) bool { + return errp != nil && s.next(*errp) +} + +// check sets s in error if not already in error and if the error is not nil or io.EOF, +func (s *_State) check(errp *error) { + if s.state == errorState || errp == nil { + return + } + if err := *errp; err != nil { + s.err = fmt.Errorf("%w[%s]", err, s.state) + if !errors.Is(err, io.EOF) { + s.state = errorState + } + } +} + +func (s *_State) fail() error { + s.state = errorState + s.err = fmt.Errorf("%w[%s]", lz4errors.ErrInternalUnhandledState, s.state) + return s.err +} diff --git a/vendor/github.com/pierrec/lz4/v4/state_gen.go b/vendor/github.com/pierrec/lz4/v4/state_gen.go new file mode 100644 index 000000000..75fb82892 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/state_gen.go @@ -0,0 +1,28 @@ +// Code generated by "stringer -type=aState -output state_gen.go"; DO NOT EDIT. + +package lz4 + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[noState-0] + _ = x[errorState-1] + _ = x[newState-2] + _ = x[readState-3] + _ = x[writeState-4] + _ = x[closedState-5] +} + +const _aState_name = "noStateerrorStatenewStatereadStatewriteStateclosedState" + +var _aState_index = [...]uint8{0, 7, 17, 25, 34, 44, 55} + +func (i aState) String() string { + if i >= aState(len(_aState_index)-1) { + return "aState(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _aState_name[_aState_index[i]:_aState_index[i+1]] +} diff --git a/vendor/github.com/pierrec/lz4/v4/writer.go b/vendor/github.com/pierrec/lz4/v4/writer.go new file mode 100644 index 000000000..77699f2b5 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/writer.go @@ -0,0 +1,238 @@ +package lz4 + +import ( + "io" + + "github.com/pierrec/lz4/v4/internal/lz4block" + "github.com/pierrec/lz4/v4/internal/lz4errors" + "github.com/pierrec/lz4/v4/internal/lz4stream" +) + +var writerStates = []aState{ + noState: newState, + newState: writeState, + writeState: closedState, + closedState: newState, + errorState: newState, +} + +// NewWriter returns a new LZ4 frame encoder. +func NewWriter(w io.Writer) *Writer { + zw := &Writer{frame: lz4stream.NewFrame()} + zw.state.init(writerStates) + _ = zw.Apply(DefaultBlockSizeOption, DefaultChecksumOption, DefaultConcurrency, defaultOnBlockDone) + zw.Reset(w) + return zw +} + +// Writer allows writing an LZ4 stream. +type Writer struct { + state _State + src io.Writer // destination writer + level lz4block.CompressionLevel // how hard to try + num int // concurrency level + frame *lz4stream.Frame // frame being built + data []byte // pending data + idx int // size of pending data + handler func(int) + legacy bool +} + +func (*Writer) private() {} + +func (w *Writer) Apply(options ...Option) (err error) { + defer w.state.check(&err) + switch w.state.state { + case newState: + case errorState: + return w.state.err + default: + return lz4errors.ErrOptionClosedOrError + } + w.Reset(w.src) + for _, o := range options { + if err = o(w); err != nil { + return + } + } + return +} + +func (w *Writer) isNotConcurrent() bool { + return w.num == 1 +} + +// init sets up the Writer when in newState. It does not change the Writer state. +func (w *Writer) init() error { + w.frame.InitW(w.src, w.num, w.legacy) + size := w.frame.Descriptor.Flags.BlockSizeIndex() + w.data = size.Get() + w.idx = 0 + return w.frame.Descriptor.Write(w.frame, w.src) +} + +func (w *Writer) Write(buf []byte) (n int, err error) { + defer w.state.check(&err) + switch w.state.state { + case writeState: + case closedState, errorState: + return 0, w.state.err + case newState: + if err = w.init(); w.state.next(err) { + return + } + default: + return 0, w.state.fail() + } + + zn := len(w.data) + for len(buf) > 0 { + if w.isNotConcurrent() && w.idx == 0 && len(buf) >= zn { + // Avoid a copy as there is enough data for a block. + if err = w.write(buf[:zn], false); err != nil { + return + } + n += zn + buf = buf[zn:] + continue + } + // Accumulate the data to be compressed. + m := copy(w.data[w.idx:], buf) + n += m + w.idx += m + buf = buf[m:] + + if w.idx < len(w.data) { + // Buffer not filled. + return + } + + // Buffer full. + if err = w.write(w.data, true); err != nil { + return + } + if !w.isNotConcurrent() { + size := w.frame.Descriptor.Flags.BlockSizeIndex() + w.data = size.Get() + } + w.idx = 0 + } + return +} + +func (w *Writer) write(data []byte, safe bool) error { + if w.isNotConcurrent() { + block := w.frame.Blocks.Block + err := block.Compress(w.frame, data, w.level).Write(w.frame, w.src) + w.handler(len(block.Data)) + return err + } + c := make(chan *lz4stream.FrameDataBlock) + w.frame.Blocks.Blocks <- c + go func(c chan *lz4stream.FrameDataBlock, data []byte, safe bool) { + b := lz4stream.NewFrameDataBlock(w.frame) + c <- b.Compress(w.frame, data, w.level) + <-c + w.handler(len(b.Data)) + b.Close(w.frame) + if safe { + // safe to put it back as the last usage of it was FrameDataBlock.Write() called before c is closed + lz4block.Put(data) + } + }(c, data, safe) + + return nil +} + +// Flush any buffered data to the underlying writer immediately. +func (w *Writer) Flush() (err error) { + switch w.state.state { + case writeState: + case errorState: + return w.state.err + default: + return nil + } + + if w.idx > 0 { + // Flush pending data, disable w.data freeing as it is done later on. + if err = w.write(w.data[:w.idx], false); err != nil { + return err + } + w.idx = 0 + } + return nil +} + +// Close closes the Writer, flushing any unwritten data to the underlying writer +// without closing it. +func (w *Writer) Close() error { + if err := w.Flush(); err != nil { + return err + } + err := w.frame.CloseW(w.src, w.num) + // It is now safe to free the buffer. + if w.data != nil { + lz4block.Put(w.data) + w.data = nil + } + return err +} + +// Reset clears the state of the Writer w such that it is equivalent to its +// initial state from NewWriter, but instead writing to writer. +// Reset keeps the previous options unless overwritten by the supplied ones. +// No access to writer is performed. +// +// w.Close must be called before Reset or pending data may be dropped. +func (w *Writer) Reset(writer io.Writer) { + w.frame.Reset(w.num) + w.state.reset() + w.src = writer +} + +// ReadFrom efficiently reads from r and compressed into the Writer destination. +func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) { + switch w.state.state { + case closedState, errorState: + return 0, w.state.err + case newState: + if err = w.init(); w.state.next(err) { + return + } + default: + return 0, w.state.fail() + } + defer w.state.check(&err) + + size := w.frame.Descriptor.Flags.BlockSizeIndex() + var done bool + var rn int + data := size.Get() + if w.isNotConcurrent() { + // Keep the same buffer for the whole process. + defer lz4block.Put(data) + } + for !done { + rn, err = io.ReadFull(r, data) + switch err { + case nil: + case io.EOF, io.ErrUnexpectedEOF: // read may be partial + done = true + default: + return + } + n += int64(rn) + err = w.write(data[:rn], true) + if err != nil { + return + } + w.handler(rn) + if !done && !w.isNotConcurrent() { + // The buffer will be returned automatically by go routines (safe=true) + // so get a new one fo the next round. + data = size.Get() + } + } + return +} diff --git a/vendor/github.com/u-root/uio/rand/random.go b/vendor/github.com/u-root/uio/rand/random.go index 90e0a98f5..e189199b9 100644 --- a/vendor/github.com/u-root/uio/rand/random.go +++ b/vendor/github.com/u-root/uio/rand/random.go @@ -43,7 +43,7 @@ type contextReader interface { // ctxReader takes a contextReader and turns it into a ContextReader. type ctxReader struct { contextReader - ctx context.Context + ctx context.Context //nolint:containedctx } func (cr ctxReader) Read(b []byte) (int, error) { diff --git a/vendor/github.com/u-root/uio/rand/random_linux.go b/vendor/github.com/u-root/uio/rand/random_linux.go index d13db3581..42931cca0 100644 --- a/vendor/github.com/u-root/uio/rand/random_linux.go +++ b/vendor/github.com/u-root/uio/rand/random_linux.go @@ -49,14 +49,14 @@ func (r *getrandomReader) ReadContext(ctx context.Context, b []byte) (int, error // initialized. n, err := unix.Getrandom(b, unix.GRND_NONBLOCK) if err == nil { - return n, err + return n, nil } select { case <-ctx.Done(): return 0, ctx.Err() default: - if err != nil && err != syscall.EAGAIN && err != syscall.EINTR { + if err != syscall.EAGAIN && err != syscall.EINTR { return n, err } } diff --git a/vendor/github.com/u-root/uio/rand/random_urandom.go b/vendor/github.com/u-root/uio/rand/random_urandom.go index 6e0ea3dad..cd6e2639b 100644 --- a/vendor/github.com/u-root/uio/rand/random_urandom.go +++ b/vendor/github.com/u-root/uio/rand/random_urandom.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix || darwin || dragonfly || freebsd || nacl || netbsd || openbsd || solaris || linux // +build aix darwin dragonfly freebsd nacl netbsd openbsd solaris linux package rand @@ -44,14 +45,14 @@ func (r *urandomReader) ReadContext(ctx context.Context, b []byte) (int, error) for { n, err := unix.Read(r.fd, b) if err == nil { - return n, err + return n, nil } select { case <-ctx.Done(): return 0, ctx.Err() default: - if err != nil && err != syscall.EAGAIN && err != syscall.EINTR { + if err != syscall.EAGAIN && err != syscall.EINTR { return n, err } } diff --git a/vendor/github.com/u-root/uio/ubinary/big_endian.go b/vendor/github.com/u-root/uio/ubinary/big_endian.go deleted file mode 100644 index 8a1f94409..000000000 --- a/vendor/github.com/u-root/uio/ubinary/big_endian.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2018 the u-root Authors. All rights reserved -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build mips mips64 ppc64 s390x - -package ubinary - -import ( - "encoding/binary" -) - -// NativeEndian is $GOARCH's implementation of byte order. -var NativeEndian = binary.BigEndian diff --git a/vendor/github.com/u-root/uio/ubinary/doc.go b/vendor/github.com/u-root/uio/ubinary/doc.go deleted file mode 100644 index 5d5c5b7be..000000000 --- a/vendor/github.com/u-root/uio/ubinary/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright 2018 the u-root Authors. All rights reserved -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ubinary provides a native endian binary.ByteOrder. -package ubinary diff --git a/vendor/github.com/u-root/uio/ubinary/little_endian.go b/vendor/github.com/u-root/uio/ubinary/little_endian.go deleted file mode 100644 index 317bb91ae..000000000 --- a/vendor/github.com/u-root/uio/ubinary/little_endian.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2018 the u-root Authors. All rights reserved -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build 386 amd64 arm arm64 mipsle mips64le ppc64le riscv riscv64 - -package ubinary - -import ( - "encoding/binary" -) - -// NativeEndian is $GOARCH's implementation of byte order. -var NativeEndian = binary.LittleEndian diff --git a/vendor/github.com/u-root/uio/uio/archivereader.go b/vendor/github.com/u-root/uio/uio/archivereader.go new file mode 100644 index 000000000..4a3a9fc06 --- /dev/null +++ b/vendor/github.com/u-root/uio/uio/archivereader.go @@ -0,0 +1,85 @@ +// Copyright 2021 the u-root Authors. All rights reserved +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uio + +import ( + "bytes" + "errors" + "io" + + "github.com/pierrec/lz4/v4" +) + +const ( + // preReadSizeBytes is the num of bytes pre-read from a io.Reader that will + // be used to match against archive header. + defaultArchivePreReadSizeBytes = 1024 +) + +var ErrPreReadError = errors.New("pre-read nothing") + +// ArchiveReader reads from a io.Reader, decompresses source bytes +// when applicable. +// +// It allows probing for multiple archive format, while still able +// to read from beginning, by pre-reading a small number of bytes. +// +// Always use newArchiveReader to initialize. +type ArchiveReader struct { + // src is where we read source bytes. + src io.Reader + // buf stores pre-read bytes from original io.Reader. Archive format + // detection will be done against it. + buf []byte + + // preReadSizeBytes is how many bytes we pre-read for magic number + // matching for each archive type. This should be greater than or + // equal to the largest header frame size of each supported archive + // format. + preReadSizeBytes int +} + +func NewArchiveReader(r io.Reader) (ArchiveReader, error) { + ar := ArchiveReader{ + src: r, + // Randomly chosen, should be enough for most types: + // + // e.g. gzip with 10 byte header, lz4 with a header size + // between 7 and 19 bytes. + preReadSizeBytes: defaultArchivePreReadSizeBytes, + } + pbuf := make([]byte, ar.preReadSizeBytes) + + nr, err := io.ReadFull(r, pbuf) + // In case the image is smaller pre-read block size, 1kb for now. + // Ever possible ? probably not in case a compression is needed! + ar.buf = pbuf[:nr] + if err == io.EOF { + // If we could not pre-read anything, we can't determine if + // it is a compressed file. + ar.src = io.MultiReader(bytes.NewReader(pbuf[:nr]), r) + return ar, ErrPreReadError + } + + // Try each supported compression type, return upon first match. + + // Try lz4. + // magic number error will be thrown if source is not a lz4 archive. + // e.g. "lz4: bad magic number". + if ok, err := lz4.ValidFrameHeader(ar.buf); err == nil && ok { + ar.src = lz4.NewReader(io.MultiReader(bytes.NewReader(ar.buf), r)) + return ar, nil + } + + // Try other archive types here, gzip, xz, etc when needed. + + // Last resort, read as is. + ar.src = io.MultiReader(bytes.NewReader(ar.buf), r) + return ar, nil +} + +func (ar ArchiveReader) Read(p []byte) (n int, err error) { + return ar.src.Read(p) +} diff --git a/vendor/github.com/u-root/uio/uio/buffer.go b/vendor/github.com/u-root/uio/uio/buffer.go index 506e741e8..158a4d1f1 100644 --- a/vendor/github.com/u-root/uio/uio/buffer.go +++ b/vendor/github.com/u-root/uio/uio/buffer.go @@ -6,9 +6,10 @@ package uio import ( "encoding/binary" + "errors" "fmt" - "github.com/u-root/uio/ubinary" + "github.com/josharian/native" ) // Marshaler is the interface implemented by an object that can marshal itself @@ -93,11 +94,15 @@ func (b *Buffer) WriteN(n int) []byte { return b.data[len(b.data)-n:] } +// ErrBufferTooShort is returned when a caller wants to read more bytes than +// are available in the buffer. +var ErrBufferTooShort = errors.New("buffer too short") + // ReadN consumes n bytes from the Buffer. It returns nil, false if there // aren't enough bytes left. func (b *Buffer) ReadN(n int) ([]byte, error) { if !b.Has(n) { - return nil, fmt.Errorf("buffer too short at position %d: have %d bytes, want %d bytes", b.byteCount, b.Len(), n) + return nil, fmt.Errorf("%w at position %d: have %d bytes, want %d bytes", ErrBufferTooShort, b.byteCount, b.Len(), n) } rval := b.data[:n] b.data = b.data[n:] @@ -129,12 +134,12 @@ func (b *Buffer) Cap() int { // // Use: // -// func (s *something) Unmarshal(l *Lexer) { -// s.Foo = l.Read8() -// s.Bar = l.Read8() -// s.Baz = l.Read16() -// return l.Error() -// } +// func (s *something) Unmarshal(l *Lexer) { +// s.Foo = l.Read8() +// s.Bar = l.Read8() +// s.Baz = l.Read16() +// return l.Error() +// } type Lexer struct { *Buffer @@ -173,11 +178,14 @@ func NewBigEndianBuffer(b []byte) *Lexer { func NewNativeEndianBuffer(b []byte) *Lexer { return &Lexer{ Buffer: NewBuffer(b), - order: ubinary.NativeEndian, + order: native.Endian, } } -func (l *Lexer) setError(err error) { +// SetError sets the error if no error has previously been set. +// +// The error can later be retried with Error or FinError methods. +func (l *Lexer) SetError(err error) { if l.err == nil { l.err = err } @@ -189,7 +197,7 @@ func (l *Lexer) setError(err error) { func (l *Lexer) Consume(n int) []byte { v, err := l.Buffer.ReadN(n) if err != nil { - l.setError(err) + l.SetError(err) return nil } return v @@ -204,6 +212,9 @@ func (l *Lexer) Error() error { return l.err } +// ErrUnreadBytes is returned when there is more data left to read in the buffer. +var ErrUnreadBytes = errors.New("buffer contains unread bytes") + // FinError returns an error if an error occurred or if there is more data left // to read in the buffer. func (l *Lexer) FinError() error { @@ -211,7 +222,7 @@ func (l *Lexer) FinError() error { return l.err } if l.Buffer.Len() > 0 { - return fmt.Errorf("buffer contains more bytes than it should") + return ErrUnreadBytes } return nil } @@ -224,7 +235,7 @@ func (l *Lexer) Read8() uint8 { if v == nil { return 0 } - return uint8(v[0]) + return v[0] } // Read16 reads a 16-bit value from the Buffer. @@ -303,7 +314,7 @@ func (l *Lexer) Read(p []byte) (int, error) { // // If an error occurred, Error() will return a non-nil error. func (l *Lexer) ReadData(data interface{}) { - l.setError(binary.Read(l, l.order, data)) + l.SetError(binary.Read(l, l.order, data)) } // WriteData writes a binary representation of data to the buffer. @@ -312,14 +323,14 @@ func (l *Lexer) ReadData(data interface{}) { // // If an error occurred, Error() will return a non-nil error. func (l *Lexer) WriteData(data interface{}) { - l.setError(binary.Write(l, l.order, data)) + l.SetError(binary.Write(l, l.order, data)) } // Write8 writes a byte to the Buffer. // // If an error occurred, Error() will return a non-nil error. func (l *Lexer) Write8(v uint8) { - l.append(1)[0] = byte(v) + l.append(1)[0] = v } // Write16 writes a 16-bit value to the Buffer. diff --git a/vendor/github.com/u-root/uio/uio/null.go b/vendor/github.com/u-root/uio/uio/null.go index 64156f4c0..7f3caebb4 100644 --- a/vendor/github.com/u-root/uio/uio/null.go +++ b/vendor/github.com/u-root/uio/uio/null.go @@ -40,7 +40,7 @@ var blackHolePool = sync.Pool{ func (devNull) ReadFrom(r io.Reader) (n int64, err error) { bufp := blackHolePool.Get().(*[]byte) - readSize := 0 + var readSize int for { readSize, err = r.Read(*bufp) n += int64(readSize) diff --git a/vendor/github.com/u-root/uio/uio/progress.go b/vendor/github.com/u-root/uio/uio/progress.go index 606b1eabe..e2b595eed 100644 --- a/vendor/github.com/u-root/uio/uio/progress.go +++ b/vendor/github.com/u-root/uio/uio/progress.go @@ -26,11 +26,11 @@ type ProgressReadCloser struct { func (rc *ProgressReadCloser) Read(p []byte) (n int, err error) { defer func() { numSymbols := (rc.counter%rc.Interval + n) / rc.Interval - rc.W.Write([]byte(strings.Repeat(rc.Symbol, numSymbols))) + _, _ = rc.W.Write([]byte(strings.Repeat(rc.Symbol, numSymbols))) rc.counter += n rc.written = (rc.written || numSymbols > 0) if err == io.EOF && rc.written { - rc.W.Write([]byte("\n")) + _, _ = rc.W.Write([]byte("\n")) } }() return rc.RC.Read(p) diff --git a/vendor/github.com/u-root/uio/uio/reader.go b/vendor/github.com/u-root/uio/uio/reader.go index a32d66592..0ca839a07 100644 --- a/vendor/github.com/u-root/uio/uio/reader.go +++ b/vendor/github.com/u-root/uio/uio/reader.go @@ -7,8 +7,8 @@ package uio import ( "bytes" "io" - "io/ioutil" "math" + "os" "reflect" ) @@ -26,7 +26,7 @@ func ReadAll(r io.ReaderAt) ([]byte, error) { if imra, ok := r.(inMemReaderAt); ok { return imra.Bytes(), nil } - return ioutil.ReadAll(Reader(r)) + return io.ReadAll(Reader(r)) } // Reader generates a Reader from a ReaderAt. @@ -46,3 +46,22 @@ func ReaderAtEqual(r1, r2 io.ReaderAt) bool { } return bytes.Equal(c, d) && reflect.DeepEqual(r1err, r2err) } + +// ReadIntoFile reads all from io.Reader into the file at given path. +// +// If the file at given path does not exist, a new file will be created. +// If the file exists at the given path, but not empty, it will be truncated. +func ReadIntoFile(r io.Reader, p string) error { + f, err := os.OpenFile(p, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o644) + if err != nil { + return err + } + defer f.Close() + + _, err = io.Copy(f, r) + if err != nil { + return err + } + + return f.Close() +} diff --git a/vendor/github.com/u-root/uio/ulog/log.go b/vendor/github.com/u-root/uio/ulog/log.go new file mode 100644 index 000000000..fbf00f720 --- /dev/null +++ b/vendor/github.com/u-root/uio/ulog/log.go @@ -0,0 +1,31 @@ +// Copyright 2019 the u-root Authors. All rights reserved +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ulog exposes logging via a Go interface. +// +// ulog has three implementations of the Logger interface: a Go standard +// library "log" package Logger and a test Logger that logs via a test's +// testing.TB.Logf. To use the test logger import "ulog/ulogtest". +package ulog + +import "log" + +// Logger is a log receptacle. +// +// It puts your information somewhere for safekeeping. +type Logger interface { + Printf(format string, v ...interface{}) + Print(v ...interface{}) +} + +// Log is a Logger that prints to the log package's default logger. +var Log Logger = log.Default() + +type emptyLogger struct{} + +func (emptyLogger) Printf(format string, v ...interface{}) {} +func (emptyLogger) Print(v ...interface{}) {} + +// Null is a logger that prints nothing. +var Null Logger = emptyLogger{} diff --git a/vendor/github.com/vishvananda/netns/.golangci.yml b/vendor/github.com/vishvananda/netns/.golangci.yml new file mode 100644 index 000000000..600bef78e --- /dev/null +++ b/vendor/github.com/vishvananda/netns/.golangci.yml @@ -0,0 +1,2 @@ +run: + timeout: 5m diff --git a/vendor/github.com/vishvananda/netns/README.md b/vendor/github.com/vishvananda/netns/README.md index 1fdb2d3e4..bdfedbe81 100644 --- a/vendor/github.com/vishvananda/netns/README.md +++ b/vendor/github.com/vishvananda/netns/README.md @@ -23,6 +23,7 @@ import ( "fmt" "net" "runtime" + "github.com/vishvananda/netns" ) @@ -48,14 +49,3 @@ func main() { } ``` - -## NOTE - -The library can be safely used only with Go >= 1.10 due to [golang/go#20676](https://github.com/golang/go/issues/20676). - -After locking a goroutine to its current OS thread with `runtime.LockOSThread()` -and changing its network namespace, any new subsequent goroutine won't be -scheduled on that thread while it's locked. Therefore, the new goroutine -will run in a different namespace leading to unexpected results. - -See [here](https://www.weave.works/blog/linux-namespaces-golang-followup) for more details. diff --git a/vendor/github.com/vishvananda/netns/doc.go b/vendor/github.com/vishvananda/netns/doc.go new file mode 100644 index 000000000..cd4093a4d --- /dev/null +++ b/vendor/github.com/vishvananda/netns/doc.go @@ -0,0 +1,9 @@ +// Package netns allows ultra-simple network namespace handling. NsHandles +// can be retrieved and set. Note that the current namespace is thread +// local so actions that set and reset namespaces should use LockOSThread +// to make sure the namespace doesn't change due to a goroutine switch. +// It is best to close NsHandles when you are done with them. This can be +// accomplished via a `defer ns.Close()` on the handle. Changing namespaces +// requires elevated privileges, so in most cases this code needs to be run +// as root. +package netns diff --git a/vendor/github.com/vishvananda/netns/netns_linux.go b/vendor/github.com/vishvananda/netns/netns_linux.go index 6be5c55c1..2ed7c7e2f 100644 --- a/vendor/github.com/vishvananda/netns/netns_linux.go +++ b/vendor/github.com/vishvananda/netns/netns_linux.go @@ -1,34 +1,31 @@ -//go:build linux && go1.10 -// +build linux,go1.10 - package netns import ( "fmt" - "io/ioutil" "os" "path" "path/filepath" "strconv" "strings" - "syscall" "golang.org/x/sys/unix" ) -// Deprecated: use syscall pkg instead (go >= 1.5 needed). +// Deprecated: use golang.org/x/sys/unix pkg instead. const ( - CLONE_NEWUTS = 0x04000000 /* New utsname group? */ - CLONE_NEWIPC = 0x08000000 /* New ipcs */ - CLONE_NEWUSER = 0x10000000 /* New user namespace */ - CLONE_NEWPID = 0x20000000 /* New pid namespace */ - CLONE_NEWNET = 0x40000000 /* New network namespace */ - CLONE_IO = 0x80000000 /* Get io context */ - bindMountPath = "/run/netns" /* Bind mount path for named netns */ + CLONE_NEWUTS = unix.CLONE_NEWUTS /* New utsname group? */ + CLONE_NEWIPC = unix.CLONE_NEWIPC /* New ipcs */ + CLONE_NEWUSER = unix.CLONE_NEWUSER /* New user namespace */ + CLONE_NEWPID = unix.CLONE_NEWPID /* New pid namespace */ + CLONE_NEWNET = unix.CLONE_NEWNET /* New network namespace */ + CLONE_IO = unix.CLONE_IO /* Get io context */ ) -// Setns sets namespace using syscall. Note that this should be a method -// in syscall but it has not been added. +const bindMountPath = "/run/netns" /* Bind mount path for named netns */ + +// Setns sets namespace using golang.org/x/sys/unix.Setns. +// +// Deprecated: Use golang.org/x/sys/unix.Setns instead. func Setns(ns NsHandle, nstype int) (err error) { return unix.Setns(int(ns), nstype) } @@ -36,19 +33,20 @@ func Setns(ns NsHandle, nstype int) (err error) { // Set sets the current network namespace to the namespace represented // by NsHandle. func Set(ns NsHandle) (err error) { - return Setns(ns, CLONE_NEWNET) + return unix.Setns(int(ns), unix.CLONE_NEWNET) } // New creates a new network namespace, sets it as current and returns // a handle to it. func New() (ns NsHandle, err error) { - if err := unix.Unshare(CLONE_NEWNET); err != nil { + if err := unix.Unshare(unix.CLONE_NEWNET); err != nil { return -1, err } return Get() } -// NewNamed creates a new named network namespace and returns a handle to it +// NewNamed creates a new named network namespace, sets it as current, +// and returns a handle to it func NewNamed(name string) (NsHandle, error) { if _, err := os.Stat(bindMountPath); os.IsNotExist(err) { err = os.MkdirAll(bindMountPath, 0755) @@ -66,13 +64,15 @@ func NewNamed(name string) (NsHandle, error) { f, err := os.OpenFile(namedPath, os.O_CREATE|os.O_EXCL, 0444) if err != nil { + newNs.Close() return None(), err } f.Close() - nsPath := fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), syscall.Gettid()) - err = syscall.Mount(nsPath, namedPath, "bind", syscall.MS_BIND, "") + nsPath := fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), unix.Gettid()) + err = unix.Mount(nsPath, namedPath, "bind", unix.MS_BIND, "") if err != nil { + newNs.Close() return None(), err } @@ -83,7 +83,7 @@ func NewNamed(name string) (NsHandle, error) { func DeleteNamed(name string) error { namedPath := path.Join(bindMountPath, name) - err := syscall.Unmount(namedPath, syscall.MNT_DETACH) + err := unix.Unmount(namedPath, unix.MNT_DETACH) if err != nil { return err } @@ -109,7 +109,7 @@ func GetFromPath(path string) (NsHandle, error) { // GetFromName gets a handle to a named network namespace such as one // created by `ip netns add`. func GetFromName(name string) (NsHandle, error) { - return GetFromPath(fmt.Sprintf("/var/run/netns/%s", name)) + return GetFromPath(filepath.Join(bindMountPath, name)) } // GetFromPid gets a handle to the network namespace of a given pid. @@ -135,7 +135,7 @@ func GetFromDocker(id string) (NsHandle, error) { // borrowed from docker/utils/utils.go func findCgroupMountpoint(cgroupType string) (int, string, error) { - output, err := ioutil.ReadFile("/proc/mounts") + output, err := os.ReadFile("/proc/mounts") if err != nil { return -1, "", err } @@ -165,7 +165,7 @@ func findCgroupMountpoint(cgroupType string) (int, string, error) { // borrowed from docker/utils/utils.go // modified to get the docker pid instead of using /proc/self func getDockerCgroup(cgroupVer int, cgroupType string) (string, error) { - dockerpid, err := ioutil.ReadFile("/var/run/docker.pid") + dockerpid, err := os.ReadFile("/var/run/docker.pid") if err != nil { return "", err } @@ -177,7 +177,7 @@ func getDockerCgroup(cgroupVer int, cgroupType string) (string, error) { if err != nil { return "", err } - output, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/cgroup", pid)) + output, err := os.ReadFile(fmt.Sprintf("/proc/%d/cgroup", pid)) if err != nil { return "", err } @@ -264,7 +264,7 @@ func getPidForContainer(id string) (int, error) { return pid, fmt.Errorf("Unable to find container: %v", id[:len(id)-1]) } - output, err := ioutil.ReadFile(filename) + output, err := os.ReadFile(filename) if err != nil { return pid, err } diff --git a/vendor/github.com/vishvananda/netns/netns_unspecified.go b/vendor/github.com/vishvananda/netns/netns_others.go similarity index 63% rename from vendor/github.com/vishvananda/netns/netns_unspecified.go rename to vendor/github.com/vishvananda/netns/netns_others.go index d06af62b6..048983774 100644 --- a/vendor/github.com/vishvananda/netns/netns_unspecified.go +++ b/vendor/github.com/vishvananda/netns/netns_others.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package netns @@ -10,6 +11,14 @@ var ( ErrNotImplemented = errors.New("not implemented") ) +// Setns sets namespace using golang.org/x/sys/unix.Setns on Linux. It +// is not implemented on other platforms. +// +// Deprecated: Use golang.org/x/sys/unix.Setns instead. +func Setns(ns NsHandle, nstype int) (err error) { + return ErrNotImplemented +} + func Set(ns NsHandle) (err error) { return ErrNotImplemented } @@ -18,6 +27,14 @@ func New() (ns NsHandle, err error) { return -1, ErrNotImplemented } +func NewNamed(name string) (NsHandle, error) { + return -1, ErrNotImplemented +} + +func DeleteNamed(name string) error { + return ErrNotImplemented +} + func Get() (NsHandle, error) { return -1, ErrNotImplemented } diff --git a/vendor/github.com/vishvananda/netns/netns.go b/vendor/github.com/vishvananda/netns/nshandle_linux.go similarity index 75% rename from vendor/github.com/vishvananda/netns/netns.go rename to vendor/github.com/vishvananda/netns/nshandle_linux.go index 116befd54..1baffb66a 100644 --- a/vendor/github.com/vishvananda/netns/netns.go +++ b/vendor/github.com/vishvananda/netns/nshandle_linux.go @@ -1,11 +1,3 @@ -// Package netns allows ultra-simple network namespace handling. NsHandles -// can be retrieved and set. Note that the current namespace is thread -// local so actions that set and reset namespaces should use LockOSThread -// to make sure the namespace doesn't change due to a goroutine switch. -// It is best to close NsHandles when you are done with them. This can be -// accomplished via a `defer ns.Close()` on the handle. Changing namespaces -// requires elevated privileges, so in most cases this code needs to be run -// as root. package netns import ( @@ -38,7 +30,7 @@ func (ns NsHandle) Equal(other NsHandle) bool { // String shows the file descriptor number and its dev and inode. func (ns NsHandle) String() string { if ns == -1 { - return "NS(None)" + return "NS(none)" } var s unix.Stat_t if err := unix.Fstat(int(ns), &s); err != nil { @@ -71,7 +63,7 @@ func (ns *NsHandle) Close() error { if err := unix.Close(int(*ns)); err != nil { return err } - (*ns) = -1 + *ns = -1 return nil } diff --git a/vendor/github.com/vishvananda/netns/nshandle_others.go b/vendor/github.com/vishvananda/netns/nshandle_others.go new file mode 100644 index 000000000..af727bc09 --- /dev/null +++ b/vendor/github.com/vishvananda/netns/nshandle_others.go @@ -0,0 +1,45 @@ +//go:build !linux +// +build !linux + +package netns + +// NsHandle is a handle to a network namespace. It can only be used on Linux, +// but provides stub methods on other platforms. +type NsHandle int + +// Equal determines if two network handles refer to the same network +// namespace. It is only implemented on Linux. +func (ns NsHandle) Equal(_ NsHandle) bool { + return false +} + +// String shows the file descriptor number and its dev and inode. +// It is only implemented on Linux, and returns "NS(none)" on other +// platforms. +func (ns NsHandle) String() string { + return "NS(none)" +} + +// UniqueId returns a string which uniquely identifies the namespace +// associated with the network handle. It is only implemented on Linux, +// and returns "NS(none)" on other platforms. +func (ns NsHandle) UniqueId() string { + return "NS(none)" +} + +// IsOpen returns true if Close() has not been called. It is only implemented +// on Linux and always returns false on other platforms. +func (ns NsHandle) IsOpen() bool { + return false +} + +// Close closes the NsHandle and resets its file descriptor to -1. +// It is only implemented on Linux. +func (ns *NsHandle) Close() error { + return nil +} + +// None gets an empty (closed) NsHandle. +func None() NsHandle { + return NsHandle(-1) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index f2dd9b9fd..2410dd484 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -41,13 +41,25 @@ github.com/google/go-cmp/cmp/internal/value ## explicit; go 1.12 github.com/google/gopacket github.com/google/gopacket/layers -# github.com/insomniacslk/dhcp v0.0.0-20220504074936-1ca156eafb9f -## explicit; go 1.13 +# github.com/hugelgupf/p9 v0.3.1-0.20230822151754-54f5c5530921 +## explicit; go 1.20 +github.com/hugelgupf/p9/fsimpl/localfs +github.com/hugelgupf/p9/fsimpl/templatefs +github.com/hugelgupf/p9/fsimpl/xattr +github.com/hugelgupf/p9/internal +github.com/hugelgupf/p9/linux +github.com/hugelgupf/p9/p9 +github.com/hugelgupf/p9/vecnet +# github.com/insomniacslk/dhcp v0.0.0-20230731140434-0f9eb93a696c +## explicit; go 1.18 github.com/insomniacslk/dhcp/dhcpv4 github.com/insomniacslk/dhcp/dhcpv4/server4 github.com/insomniacslk/dhcp/iana github.com/insomniacslk/dhcp/interfaces github.com/insomniacslk/dhcp/rfc1035label +# github.com/josharian/native v1.1.0 +## explicit; go 1.13 +github.com/josharian/native # github.com/linuxkit/virtsock v0.0.0-20220523201153-1a23e78aa7a2 ## explicit; go 1.17 github.com/linuxkit/virtsock/pkg/hvsock @@ -104,6 +116,13 @@ github.com/onsi/gomega/types # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest +# github.com/pierrec/lz4/v4 v4.1.18 +## explicit; go 1.14 +github.com/pierrec/lz4/v4 +github.com/pierrec/lz4/v4/internal/lz4block +github.com/pierrec/lz4/v4/internal/lz4errors +github.com/pierrec/lz4/v4/internal/lz4stream +github.com/pierrec/lz4/v4/internal/xxh32 # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors @@ -122,17 +141,17 @@ github.com/songgao/water # github.com/stretchr/testify v1.8.4 ## explicit; go 1.20 github.com/stretchr/testify/assert -# github.com/u-root/uio v0.0.0-20210528114334-82958018845c -## explicit; go 1.15 +# github.com/u-root/uio v0.0.0-20230305220412-3e8cd9d6bf63 +## explicit; go 1.16 github.com/u-root/uio/rand -github.com/u-root/uio/ubinary github.com/u-root/uio/uio +github.com/u-root/uio/ulog # github.com/vishvananda/netlink v1.2.1-beta.2 ## explicit; go 1.12 github.com/vishvananda/netlink github.com/vishvananda/netlink/nl -# github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 -## explicit; go 1.12 +# github.com/vishvananda/netns v0.0.4 +## explicit; go 1.17 github.com/vishvananda/netns # golang.org/x/crypto v0.13.0 ## explicit; go 1.17