diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000..0ea9093c --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,7 @@ +version: 2 +updates: + - package-ecosystem: "gomod" + directory: "/" + schedule: + interval: weekly + day: sunday diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 0289380d..ed7fbc81 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -48,5 +48,4 @@ jobs: if: matrix.os == 'ubuntu-latest' run: make test-race - name: Test - if: matrix.os != 'windows-latest' run: make test diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 124b418c..4ef15558 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -148,9 +148,12 @@ builds: - amd64 archives: - - id: binary + - id: binary-version name_template: '{{ .Binary }}_v{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ with .Arm }}v{{ . }}{{ end }}{{ with .Mips }}_{{ . }}{{ end }}{{ if not (eq .Amd64 "v1") }}{{ .Amd64 }}{{ end }}' format: binary + - id: binary + name_template: '{{ .Binary }}_{{ .Os }}_{{ .Arch }}' + format: binary - id: zip name_template: '{{ .Binary }}_v{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ with .Arm }}v{{ . }}{{ end }}{{ with .Mips }}_{{ . }}{{ end }}{{ if not (eq .Amd64 "v1") }}{{ .Amd64 }}{{ end }}' format: zip diff --git a/Dockerfile b/Dockerfile index 3e2e9255..6d124dbf 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,16 +4,18 @@ WORKDIR /usr/fairos COPY go.mod go.sum /usr/fairos/ RUN go mod download COPY . /usr/fairos/ -RUN apk add --update --no-cache make gcc git musl-dev libc-dev linux-headers bash #skipcq: DOK-DL3018 -RUN make binary + +#skipcq: DOK-DL3018 +RUN apk add --update --no-cache make gcc git musl-dev libc-dev linux-headers bash \ + && make binary FROM alpine:3.15 ARG CONFIG ENV CONFIG=$CONFIG -RUN addgroup -g 10000 fds -RUN adduser -u 10000 -G fds -h /home/fds -D fds +RUN addgroup -g 10000 fds \ + && adduser -u 10000 -G fds -h /home/fds -D fds USER fds RUN if [ -n "$CONFIG" ]; then echo -e "$CONFIG" > ~/.dfs.yaml; fi EXPOSE 9090 diff --git a/Makefile b/Makefile index 7f3a05d7..99ac2503 100644 --- a/Makefile +++ b/Makefile @@ -29,6 +29,11 @@ lint: linter linter: test -f $(GOLANGCI_LINT) || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $$($(GO) env GOPATH)/bin $(GOLANGCI_LINT_VERSION) +.PHONY: swagger +swagger: + which swag || ( echo "install swag for your system from https://github.com/swaggo/swag" && exit 1) + swag init -g ./cmd/server.go -d cmd/dfs,pkg/api,cmd/common,pkg/dir,pkg/file,pkg/pod,pkg/user,pkg/collection -o ./swagger + .PHONY: vet vet: $(GO) vet ./... @@ -51,7 +56,7 @@ githooks: .PHONY: protobuftools protobuftools: - which protoc || ( echo "install protoc for your system from https://github.com/protocolbuffers/protobuf/releases" && exit 1) + which protoac || ( echo "install protoc for your system from https://github.com/protocolbuffers/protobuf/releases" && exit 1) which $(GOGOPROTOBUF) || ( cd /tmp && GO111MODULE=on $(GO) get -u github.com/gogo/protobuf/$(GOGOPROTOBUF)@$(GOGOPROTOBUF_VERSION) ) .PHONY: protobuf diff --git a/README.md b/README.md index c2bbdbc1..648249ff 100644 --- a/README.md +++ b/README.md @@ -154,7 +154,7 @@ Flags: --postageBlockId string the postage block used to store the data in bee --pprofPort string pprof port (default ":9091") --rpc string rpc endpoint for ens network. xDai for mainnet | Goerli for testnet | local fdp-play rpc endpoint for play - + --swag should run swagger-ui Global Flags: --beeApi string full bee api endpoint (default "localhost:1633") --config string config file (default "/Users/sabyasachipatra/.dfs.yaml") @@ -179,3 +179,42 @@ To make binaries for all platforms run this command `./generate-exe.sh` +### Generate swagger docs + +#### Install swag + +``` +go install github.com/swaggo/swag/cmd/swag@latest +``` + +#### Generate + +``` +swag init -g cmd/server.go -d cmd/dfs,pkg/api,cmd/common,pkg/dir,pkg/file,pkg/pod,pkg/user,pkg/collection -o ./swagger +``` + +### Running swagger-ui + +By default, swagger-ui is disabled. To run swagger-ui we run the `server` command with `--swag` flag + +``` +$ dfs server --swag +``` + +This should run the dfs server along with swagger-ui, available at `http://localhost:9090/swagger/index.html` assuming +server is running on default `9090` port on your localhost + +### Running fairOS on goerli testnet and swarm mainnet + +we need to set `network` configuration in the config file as testnet and bee configuration should point to a bee running +on mainnet + +``` +network: "testnet" +bee: + bee-api-endpoint: http://localhost:1633 # bee running on mainnet + postage-batch-id: + is-gateway-proxy: false +``` + + diff --git a/cmd/common/request.go b/cmd/common/request.go index fb3a0c30..c89990e3 100644 --- a/cmd/common/request.go +++ b/cmd/common/request.go @@ -16,44 +16,71 @@ limitations under the License. package common -type UserRequest struct { - UserName string `json:"user_name,omitempty"` +type UserSignupRequest struct { + UserName string `json:"userName,omitempty"` Password string `json:"password,omitempty"` - Address string `json:"address,omitempty"` Mnemonic string `json:"mnemonic,omitempty"` } +type UserLoginRequest struct { + UserName string `json:"userName,omitempty"` + Password string `json:"password,omitempty"` +} + type PodRequest struct { - PodName string `json:"pod_name,omitempty"` + PodName string `json:"podName,omitempty"` Password string `json:"password,omitempty"` Reference string `json:"reference,omitempty"` - SharedPodName string `json:"shared_pod_name,omitempty"` + SharedPodName string `json:"sharedPodName,omitempty"` +} + +type PodShareRequest struct { + PodName string `json:"podName,omitempty"` + SharedPodName string `json:"sharedPodName,omitempty"` +} + +type PodReceiveRequest struct { + PodName string `json:"podName,omitempty"` + Reference string `json:"sharingRef,omitempty"` + SharedPodName string `json:"sharedPodName,omitempty"` } type FileSystemRequest struct { - PodName string `json:"pod_name,omitempty"` - DirectoryPath string `json:"dir_path,omitempty"` - DirectoryName string `json:"dir_name,omitempty"` - FilePath string `json:"file_path,omitempty"` - FileName string `json:"file_name,omitempty"` - Destination string `json:"dest_user,omitempty"` + PodName string `json:"podName,omitempty"` + DirectoryPath string `json:"dirPath,omitempty"` + DirectoryName string `json:"dirName,omitempty"` + FilePath string `json:"filePath,omitempty"` + FileName string `json:"fileName,omitempty"` + Destination string `json:"destUser,omitempty"` +} + +type RenameRequest struct { + PodName string `json:"podName,omitempty"` + OldPath string `json:"oldPath,omitempty"` + NewPath string `json:"newPath,omitempty"` +} + +type FileReceiveRequest struct { + PodName string `json:"podName,omitempty"` + SharingReference string `json:"sharingRef,omitempty"` + DirectoryPath string `json:"dirPath,omitempty"` } type KVRequest struct { - PodName string `json:"pod_name,omitempty"` - TableName string `json:"table_name,omitempty"` - IndexType string `json:"index_type,omitempty"` + PodName string `json:"podName,omitempty"` + TableName string `json:"tableName,omitempty"` + IndexType string `json:"indexType,omitempty"` Key string `json:"key,omitempty"` Value string `json:"value,omitempty"` - StartPrefix string `json:"start_prefix,omitempty"` - EndPrefix string `json:"end_prefix,omitempty"` + StartPrefix string `json:"startPrefix,omitempty"` + EndPrefix string `json:"endPrefix,omitempty"` Limit string `json:"limit,omitempty"` Memory string `json:"memory,omitempty"` } type DocRequest struct { - PodName string `json:"pod_name,omitempty"` - TableName string `json:"table_name,omitempty"` + PodName string `json:"podName,omitempty"` + TableName string `json:"tableName,omitempty"` ID string `json:"id,omitempty"` Document string `json:"doc,omitempty"` SimpleIndex string `json:"si,omitempty"` @@ -61,5 +88,5 @@ type DocRequest struct { Expression string `json:"expr,omitempty"` Mutable bool `json:"mutable,omitempty"` Limit string `json:"limit,omitempty"` - FileName string `json:"file_name,omitempty"` + FileName string `json:"fileName,omitempty"` } diff --git a/cmd/common/websocket_request.go b/cmd/common/websocket_request.go index 4193bc72..1bd48cbb 100644 --- a/cmd/common/websocket_request.go +++ b/cmd/common/websocket_request.go @@ -3,16 +3,18 @@ package common import ( "bytes" "encoding/json" - "net/http" ) type Event string var ( UserSignup Event = "/user/signup" + UserSignupV2 Event = "/user/signupV2" UserLogin Event = "/user/login" + UserLoginV2 Event = "/user/loginV2" UserImport Event = "/user/import" UserPresent Event = "/user/present" + UserPresentV2 Event = "/user/presentV2" UserIsLoggedin Event = "/user/isloggedin" UserLogout Event = "/user/logout" UserExport Event = "/user/export" @@ -30,6 +32,7 @@ var ( PodReceiveInfo Event = "/pod/receiveinfo" DirIsPresent Event = "/dir/present" DirMkdir Event = "/dir/mkdir" + DirRename Event = "/dir/rename" DirRmdir Event = "/dir/rmdir" DirLs Event = "/dir/ls" DirStat Event = "/dir/stat" @@ -39,6 +42,7 @@ var ( FileUploadStream Event = "/file/upload/stream" FileShare Event = "/file/share" FileReceive Event = "/file/receive" + FileRename Event = "/file/rename" FileReceiveInfo Event = "/file/receiveinfo" FileDelete Event = "/file/delete" FileStat Event = "/file/stat" @@ -47,6 +51,7 @@ var ( KVOpen Event = "/kv/open" KVDelete Event = "/kv/delete" KVCount Event = "/kv/count" + KVEntryPresent Event = "/kv/entry/present" KVEntryPut Event = "/kv/entry/put" KVEntryGet Event = "/kv/entry/get" KVEntryDelete Event = "/kv/entry/del" @@ -69,70 +74,59 @@ var ( ) type WebsocketRequest struct { + Id string `json:"_id"` Event Event `json:"event"` Params interface{} `json:"params,omitempty"` } type FileRequest struct { - PodName string `json:"pod_name,omitempty"` - TableName string `json:"table_name,omitempty"` - DirPath string `json:"dir_path,omitempty"` - BlockSize string `json:"block_size,omitempty"` - FileName string `json:"file_name,omitempty"` + PodName string `json:"podName,omitempty"` + TableName string `json:"tableName,omitempty"` + DirPath string `json:"dirPath,omitempty"` + BlockSize string `json:"blockSize,omitempty"` + FileName string `json:"fileName,omitempty"` + ContentLength string `json:"contentLength,omitempty"` + Compression string `json:"compression,omitempty"` + Overwrite bool `json:"overwrite,omitempty"` } type FileDownloadRequest struct { - PodName string `json:"pod_name,omitempty"` - Filepath string `json:"file_path,omitempty"` + PodName string `json:"podName,omitempty"` + Filepath string `json:"filePath,omitempty"` } type WebsocketResponse struct { - Event Event `json:"event"` - StatusCode int `json:"code"` - Params interface{} `json:"params,omitempty"` - header http.Header - buf bytes.Buffer + Id string `json:"_id"` + Event Event `json:"event"` + Params interface{} `json:"params,omitempty"` + StatusCode int `json:"code,omitempty"` + buf bytes.Buffer + contentType string } func NewWebsocketResponse() *WebsocketResponse { - return &WebsocketResponse{ - header: map[string][]string{}, - } -} - -func (w *WebsocketResponse) Header() http.Header { - return w.header + return &WebsocketResponse{} } func (w *WebsocketResponse) Write(bytes []byte) (int, error) { - if w.Header().Get("Content-Type") == "application/json; charset=utf-8" || - w.Header().Get("Content-Type") == "application/json" { - body := map[string]interface{}{} - err := json.Unmarshal(bytes, &body) - if err != nil { - return 0, err - } - w.Params = body - return len(bytes), nil - } - if w.Header().Get("Content-Length") != "" || w.Header().Get("Content-Length") != "0" { - return w.buf.Write(bytes) - } - return 0, nil + return w.buf.Write(bytes) } -func (w *WebsocketResponse) WriteHeader(statusCode int) { - w.StatusCode = statusCode +func (w *WebsocketResponse) WriteJson(bytes []byte) (int, error) { + w.contentType = "json" + body := map[string]interface{}{} + err := json.Unmarshal(bytes, &body) + if err != nil { + return 0, err + } + w.Params = body + return len(bytes), nil } func (w *WebsocketResponse) Marshal() []byte { - if w.Header().Get("Content-Type") == "application/json; charset=utf-8" || - w.Header().Get("Content-Type") == "application/json" { + if w.contentType == "json" { data, _ := json.Marshal(w) return data } - if w.Header().Get("Content-Length") != "" { - return w.buf.Bytes() - } - return nil + return w.buf.Bytes() } diff --git a/cmd/dfs-cli/cmd/filesystem.go b/cmd/dfs-cli/cmd/filesystem.go index a66e5e66..d081eb80 100644 --- a/cmd/dfs-cli/cmd/filesystem.go +++ b/cmd/dfs-cli/cmd/filesystem.go @@ -123,7 +123,7 @@ func statFileOrDirectory(podName, statElement string) { fmt.Println("Mo. Time : ", time.Unix(accTime, 0).String()) fmt.Println("Ac. Time : ", time.Unix(modTime, 0).String()) for _, b := range resp.Blocks { - blkStr := fmt.Sprintf("%s, 0x%s, %s bytes, %s bytes", b.Name, b.Reference, b.Size, b.CompressedSize) + blkStr := fmt.Sprintf("0x%s, %s bytes, %s bytes", b.Reference, b.Size, b.CompressedSize) fmt.Println(blkStr) } } else { @@ -302,7 +302,6 @@ func fileReceiveInfo(podName, sharingRef string) { fmt.Println("NumberOfBlocks : ", resp.NumberOfBlocks) fmt.Println("ContentType : ", resp.ContentType) fmt.Println("Compression : ", resp.Compression) - fmt.Println("PodName : ", resp.PodName) fmt.Println("Sender : ", resp.Sender) fmt.Println("Receiver : ", resp.Receiver) fmt.Println("SharedTime : ", shTime) diff --git a/cmd/dfs-cli/cmd/user.go b/cmd/dfs-cli/cmd/user.go index 5fb25831..00c3fe30 100644 --- a/cmd/dfs-cli/cmd/user.go +++ b/cmd/dfs-cli/cmd/user.go @@ -30,7 +30,7 @@ import ( func userNew(userName, mnemonic string) { password := getPassword() - newUser := common.UserRequest{ + newUser := common.UserSignupRequest{ UserName: userName, Password: password, Mnemonic: mnemonic, @@ -68,7 +68,7 @@ func userNew(userName, mnemonic string) { func userLogin(userName, apiEndpoint string) { password := getPassword() - loginUser := common.UserRequest{ + loginUser := common.UserSignupRequest{ UserName: userName, Password: password, } @@ -89,7 +89,7 @@ func userLogin(userName, apiEndpoint string) { func deleteUser(apiEndpoint string) { password := getPassword() - delUser := common.UserRequest{ + delUser := common.UserSignupRequest{ Password: password, } jsonData, err := json.Marshal(delUser) @@ -108,7 +108,7 @@ func deleteUser(apiEndpoint string) { func migrateUser() { password := getPassword() - migrateUsr := common.UserRequest{ + migrateUsr := common.UserSignupRequest{ Password: password, } jsonData, err := json.Marshal(migrateUsr) diff --git a/cmd/dfs/cmd/commands_test.go b/cmd/dfs/cmd/commands_test.go index 7af83d9f..fa74f8c5 100644 --- a/cmd/dfs/cmd/commands_test.go +++ b/cmd/dfs/cmd/commands_test.go @@ -3,8 +3,8 @@ package cmd import ( "bytes" "io" - "io/ioutil" "os" + "path/filepath" "testing" ) @@ -32,22 +32,23 @@ func Test_ExecuteCommand(t *testing.T) { }) t.Run("server-postageBlockId-required", func(t *testing.T) { - tempDir, err := ioutil.TempDir("", ".dfs") + tempDir, err := os.MkdirTemp("", ".dfs") if err != nil { t.Fatal(err) } defer os.RemoveAll(tempDir) b := bytes.NewBufferString("") rootCmd.SetOut(b) - rootCmd.SetArgs([]string{"server", "--config", tempDir + string(os.PathSeparator) + ".dfs.yaml", "--dataDir", tempDir + string(os.PathSeparator) + ".fairOS/dfs"}) + rootCmd.SetArgs([]string{"server", "--config", tempDir + string(os.PathSeparator) + ".dfs.yaml", + "--dataDir", tempDir + string(os.PathSeparator) + ".fairOS/dfs"}) err = rootCmd.Execute() - if err.Error() != "postageBlockId is required to run server" { + if err != nil && err.Error() != "postageBlockId is required to run server" { t.Fatal("server should fail") } }) t.Run("server-postageBlockId-invalid", func(t *testing.T) { - tempDir, err := ioutil.TempDir("", ".dfs") + tempDir, err := os.MkdirTemp("", ".dfs") if err != nil { t.Fatal(err) } @@ -55,15 +56,19 @@ func Test_ExecuteCommand(t *testing.T) { b := bytes.NewBufferString("") rootCmd.SetOut(b) - rootCmd.SetArgs([]string{"server", "--postageBlockId", "postageBlockId is required to run serverpostageBlockId is required to run server", "--config", tempDir + string(os.PathSeparator) + ".dfs.yaml", "--dataDir", tempDir + string(os.PathSeparator) + ".fairOS/dfs"}) + rootCmd.SetArgs([]string{"server", "--postageBlockId", + "postageBlockId is required to run server, postageBlockId is required to run server", "--config", + filepath.Join(tempDir, ".dfs.yaml"), "--dataDir", + filepath.Join(tempDir, ".fairOS/dfs")}) + err = rootCmd.Execute() - if err.Error() != "postageBlockId is invalid" { + if err != nil && err.Error() != "postageBlockId is invalid" { t.Fatal("server should fail") } }) t.Run("server-rpc-err", func(t *testing.T) { - tempDir, err := ioutil.TempDir("", ".dfs") + tempDir, err := os.MkdirTemp("", ".dfs") if err != nil { t.Fatal(err) } @@ -71,15 +76,19 @@ func Test_ExecuteCommand(t *testing.T) { b := bytes.NewBufferString("") rootCmd.SetOut(b) - rootCmd.SetArgs([]string{"server", "--postageBlockId", "c108266827eb7ba357797de2707bea00446919346b51954f773560b79765d552", "--config", tempDir + string(os.PathSeparator) + ".dfs.yaml", "--dataDir", tempDir + string(os.PathSeparator) + ".fairOS/dfs"}) + rootCmd.SetArgs([]string{"server", "--postageBlockId", + "c108266827eb7ba357797de2707bea00446919346b51954f773560b79765d552", "--config", + filepath.Join(tempDir, ".dfs.yaml"), "--dataDir", + filepath.Join(tempDir, ".fairOS/dfs")}) + err = rootCmd.Execute() - if err.Error() != "rpc endpoint is missing" { + if err != nil && err.Error() != "rpc endpoint is missing" { t.Fatal("server should fail") } }) t.Run("server-ens-err", func(t *testing.T) { - tempDir, err := ioutil.TempDir("", ".dfs") + tempDir, err := os.MkdirTemp("", ".dfs") if err != nil { t.Fatal(err) } @@ -87,15 +96,18 @@ func Test_ExecuteCommand(t *testing.T) { b := bytes.NewBufferString("") rootCmd.SetOut(b) - rootCmd.SetArgs([]string{"server", "--rpc", "http://localhost:1633", "--postageBlockId", "c108266827eb7ba357797de2707bea00446919346b51954f773560b79765d552", "--config", tempDir + string(os.PathSeparator) + ".dfs.yaml", "--dataDir", tempDir + string(os.PathSeparator) + ".fairOS/dfs"}) + rootCmd.SetArgs([]string{"server", "--rpc", "http://localhost:1633", "--postageBlockId", + "c108266827eb7ba357797de2707bea00446919346b51954f773560b79765d552", "--config", + filepath.Join(tempDir, ".dfs.yaml"), "--dataDir", + filepath.Join(tempDir, ".fairOS/dfs")}) err = rootCmd.Execute() - if err.Error() != "ens provider domain is missing" { + if err != nil && err.Error() != "ens provider domain is missing" { t.Fatal("server should fail") } }) t.Run("server-network-err", func(t *testing.T) { - tempDir, err := ioutil.TempDir("", ".dfs") + tempDir, err := os.MkdirTemp("", ".dfs") if err != nil { t.Fatal(err) } @@ -112,12 +124,12 @@ func Test_ExecuteCommand(t *testing.T) { "--postageBlockId", "c108266827eb7ba357797de2707bea00446919346b51954f773560b79765d552", "--config", - tempDir + string(os.PathSeparator) + ".dfs.yaml", + filepath.Join(tempDir, ".dfs.yaml"), "--dataDir", - tempDir + string(os.PathSeparator) + ".fairOS/dfs", + filepath.Join(tempDir, ".fairOS/dfs"), }) err = rootCmd.Execute() - if err.Error() != "could not connect to eth backend" { + if err != nil && err.Error() != "could not connect to eth backend" { t.Fatal("server should fail") } }) diff --git a/cmd/dfs/cmd/server.go b/cmd/dfs/cmd/server.go index 2bbd0d45..85075192 100644 --- a/cmd/dfs/cmd/server.go +++ b/cmd/dfs/cmd/server.go @@ -27,13 +27,17 @@ import ( "github.com/fairdatasociety/fairOS-dfs/pkg/api" "github.com/fairdatasociety/fairOS-dfs/pkg/contracts" "github.com/fairdatasociety/fairOS-dfs/pkg/logging" + _ "github.com/fairdatasociety/fairOS-dfs/swagger" "github.com/gorilla/mux" "github.com/rs/cors" "github.com/sirupsen/logrus" "github.com/spf13/cobra" + httpSwagger "github.com/swaggo/http-swagger" ) var ( + pprof bool + swag bool httpPort string pprofPort string cookieDomain string @@ -42,7 +46,14 @@ var ( handler *api.Handler ) -// startCmd represents the start command +// @title FairOS-dfs server +// @version v0.9.0-rc1 +// @description A list of the currently provided Interfaces to interact with FairOS decentralised file system(dfs), implementing user, pod, file system, key value store and document store +// @host http://localhost:9090 +// @contact.name Sabyasachi Patra +// @contact.email sabyasachi@datafund.io +// @license.name Apache 2.0 +// @license.url http://www.apache.org/licenses/LICENSE-2.0.html var serverCmd = &cobra.Command{ Use: "server", Short: "starts a HTTP server for the dfs", @@ -183,12 +194,17 @@ can consume it.`, return err } handler = hdlr + if pprof { + go startPprofService(logger) + } startHttpService(logger) return nil }, } func init() { + serverCmd.Flags().BoolVar(&pprof, "pprof", false, "should run pprof") + serverCmd.Flags().BoolVar(&swag, "swag", false, "should run swagger-ui") serverCmd.Flags().String("httpPort", defaultDFSHttpPort, "http port") serverCmd.Flags().String("pprofPort", defaultDFSPprofPort, "pprof port") serverCmd.Flags().String("cookieDomain", defaultCookieDomain, "the domain to use in the cookie") @@ -217,6 +233,12 @@ func startHttpService(logger logging.Logger) { return } }) + if swag { + router.PathPrefix("/swagger/").Handler(httpSwagger.Handler( + httpSwagger.URL("http://localhost:9090/swagger/doc.json"), // The url pointing to API definition + )).Methods(http.MethodGet) + } + apiVersion := "v1" // v2 introduces user credentials storage on secondary location and identity storage on ens registry @@ -268,20 +290,20 @@ func startHttpService(logger logging.Logger) { userRouter.HandleFunc("/stat", handler.UserStatHandler).Methods("GET") // pod related handlers - baseRouter.HandleFunc("/pod/receive", handler.PodReceiveHandler).Methods("GET") - baseRouter.HandleFunc("/pod/receiveinfo", handler.PodReceiveInfoHandler).Methods("GET") - podRouter := baseRouter.PathPrefix("/pod/").Subrouter() podRouter.Use(handler.LoginMiddleware) podRouter.HandleFunc("/present", handler.PodPresentHandler).Methods("GET") podRouter.HandleFunc("/new", handler.PodCreateHandler).Methods("POST") podRouter.HandleFunc("/open", handler.PodOpenHandler).Methods("POST") + podRouter.HandleFunc("/open-async", handler.PodOpenAsyncHandler).Methods("POST") podRouter.HandleFunc("/close", handler.PodCloseHandler).Methods("POST") podRouter.HandleFunc("/sync", handler.PodSyncHandler).Methods("POST") podRouter.HandleFunc("/share", handler.PodShareHandler).Methods("POST") podRouter.HandleFunc("/delete", handler.PodDeleteHandler).Methods("DELETE") podRouter.HandleFunc("/ls", handler.PodListHandler).Methods("GET") podRouter.HandleFunc("/stat", handler.PodStatHandler).Methods("GET") + podRouter.HandleFunc("/receive", handler.PodReceiveHandler).Methods("GET") + podRouter.HandleFunc("/receiveinfo", handler.PodReceiveInfoHandler).Methods("GET") // directory related handlers dirRouter := baseRouter.PathPrefix("/dir/").Subrouter() @@ -291,18 +313,20 @@ func startHttpService(logger logging.Logger) { dirRouter.HandleFunc("/ls", handler.DirectoryLsHandler).Methods("GET") dirRouter.HandleFunc("/stat", handler.DirectoryStatHandler).Methods("GET") dirRouter.HandleFunc("/present", handler.DirectoryPresentHandler).Methods("GET") + dirRouter.HandleFunc("/rename", handler.DirectoryRenameHandler).Methods("POST") // file related handlers fileRouter := baseRouter.PathPrefix("/file/").Subrouter() fileRouter.Use(handler.LoginMiddleware) - fileRouter.HandleFunc("/download", handler.FileDownloadHandler).Methods("GET") - fileRouter.HandleFunc("/download", handler.FileDownloadHandler).Methods("POST") + fileRouter.HandleFunc("/download", handler.FileDownloadHandlerGet).Methods("GET") + fileRouter.HandleFunc("/download", handler.FileDownloadHandlerPost).Methods("POST") fileRouter.HandleFunc("/upload", handler.FileUploadHandler).Methods("POST") fileRouter.HandleFunc("/share", handler.FileShareHandler).Methods("POST") fileRouter.HandleFunc("/receive", handler.FileReceiveHandler).Methods("GET") fileRouter.HandleFunc("/receiveinfo", handler.FileReceiveInfoHandler).Methods("GET") fileRouter.HandleFunc("/delete", handler.FileDeleteHandler).Methods("DELETE") fileRouter.HandleFunc("/stat", handler.FileStatHandler).Methods("GET") + fileRouter.HandleFunc("/rename", handler.FileRenameHandler).Methods("POST") kvRouter := baseRouter.PathPrefix("/kv/").Subrouter() kvRouter.Use(handler.LoginMiddleware) @@ -312,7 +336,7 @@ func startHttpService(logger logging.Logger) { kvRouter.HandleFunc("/open", handler.KVOpenHandler).Methods("POST") kvRouter.HandleFunc("/count", handler.KVCountHandler).Methods("POST") kvRouter.HandleFunc("/delete", handler.KVDeleteHandler).Methods("DELETE") - kvRouter.HandleFunc("/present", handler.KVPresentHandler).Methods("GET") + kvRouter.HandleFunc("/entry/present", handler.KVPresentHandler).Methods("GET") kvRouter.HandleFunc("/entry/put", handler.KVPutHandler).Methods("POST") kvRouter.HandleFunc("/entry/get", handler.KVGetHandler).Methods("GET") kvRouter.HandleFunc("/entry/get-data", handler.KVGetDataHandler).Methods("GET") @@ -332,9 +356,9 @@ func startHttpService(logger logging.Logger) { docRouter.HandleFunc("/find", handler.DocFindHandler).Methods("GET") docRouter.HandleFunc("/loadjson", handler.DocLoadJsonHandler).Methods("POST") docRouter.HandleFunc("/indexjson", handler.DocIndexJsonHandler).Methods("POST") - docRouter.HandleFunc("/entry/put", handler.DocPutHandler).Methods("POST") - docRouter.HandleFunc("/entry/get", handler.DocGetHandler).Methods("GET") - docRouter.HandleFunc("/entry/del", handler.DocDelHandler).Methods("DELETE") + docRouter.HandleFunc("/entry/put", handler.DocEntryPutHandler).Methods("POST") + docRouter.HandleFunc("/entry/get", handler.DocEntryGetHandler).Methods("GET") + docRouter.HandleFunc("/entry/del", handler.DocEntryDelHandler).Methods("DELETE") var origins []string for _, c := range corsOrigins { @@ -353,16 +377,6 @@ func startHttpService(logger logging.Logger) { // Insert the middleware handler := c.Handler(router) - // starting the pprof server - go func() { - logger.Infof("fairOS-dfs pprof listening on port: %v", pprofPort) - err := http.ListenAndServe("localhost"+pprofPort, nil) - if err != nil { - logger.Errorf("pprof listenAndServe: %v ", err.Error()) - return - } - }() - logger.Infof("fairOS-dfs API server listening on port: %v", httpPort) err := http.ListenAndServe(httpPort, handler) if err != nil { @@ -370,3 +384,12 @@ func startHttpService(logger logging.Logger) { return } } + +func startPprofService(logger logging.Logger) { + logger.Infof("fairOS-dfs pprof listening on port: %v", pprofPort) + err := http.ListenAndServe("localhost"+pprofPort, nil) + if err != nil { + logger.Errorf("pprof listenAndServe: %v ", err.Error()) + return + } +} diff --git a/cmd/dfs/cmd/server_test.go b/cmd/dfs/cmd/server_test.go index 175df14d..1bce7fd9 100644 --- a/cmd/dfs/cmd/server_test.go +++ b/cmd/dfs/cmd/server_test.go @@ -6,11 +6,12 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "math/big" "mime/multipart" "net/http" + "net/url" "os" + "path/filepath" "strconv" "testing" "time" @@ -22,6 +23,7 @@ import ( mock2 "github.com/fairdatasociety/fairOS-dfs/pkg/ensm/eth/mock" "github.com/fairdatasociety/fairOS-dfs/pkg/logging" "github.com/fairdatasociety/fairOS-dfs/pkg/user" + "github.com/gorilla/websocket" "github.com/sirupsen/logrus" ) @@ -43,16 +45,17 @@ func TestApis(t *testing.T) { mockClient := mock.NewMockBeeClient() ens := mock2.NewMockNamespaceManager() logger := logging.New(io.Discard, logrus.ErrorLevel) - dataDir, err := ioutil.TempDir("", "new") + dataDir, err := os.MkdirTemp("", "new") if err != nil { t.Fatal(err) } defer os.RemoveAll(dataDir) users := user.NewUsers(dataDir, mockClient, ens, logger) dfsApi := dfs.NewMockDfsAPI(mockClient, users, logger, dataDir) - handler = api.NewMockHandler(dfsApi, logger) + handler = api.NewMockHandler(dfsApi, logger, []string{"http://localhost:3000"}) httpPort = ":9090" pprofPort = ":9091" + base := "localhost:9090" basev1 := "http://localhost:9090/v1" basev2 := "http://localhost:9090/v2" go startHttpService(logger) @@ -61,7 +64,7 @@ func TestApis(t *testing.T) { <-time.After(time.Second * 10) t.Run("login-fail-test", func(t *testing.T) { c := http.Client{Timeout: time.Duration(1) * time.Minute} - userRequest := &common.UserRequest{ + userRequest := &common.UserSignupRequest{ UserName: randStringRunes(16), Password: randStringRunes(8), } @@ -91,7 +94,7 @@ func TestApis(t *testing.T) { t.Run("signup-login", func(t *testing.T) { c := http.Client{Timeout: time.Duration(1) * time.Minute} - userRequest := &common.UserRequest{ + userRequest := &common.UserSignupRequest{ UserName: randStringRunes(16), Password: randStringRunes(8), } @@ -142,7 +145,7 @@ func TestApis(t *testing.T) { t.Run("signup-login-logout-loggedin", func(t *testing.T) { c := http.Client{Timeout: time.Duration(1) * time.Minute} - userRequest := &common.UserRequest{ + userRequest := &common.UserSignupRequest{ UserName: randStringRunes(16), Password: randStringRunes(8), } @@ -236,18 +239,19 @@ func TestApis(t *testing.T) { } }) - t.Run("signup-login-migrate", func(t *testing.T) { + t.Run("signup-login-pod-dir-file-rename", func(t *testing.T) { c := http.Client{Timeout: time.Duration(1) * time.Minute} - - userRequest := &common.UserRequest{ + userRequest := &common.UserSignupRequest{ UserName: randStringRunes(16), Password: randStringRunes(8), } + userBytes, err := json.Marshal(userRequest) if err != nil { t.Fatal(err) } - signupRequestDataHttpReq, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s%s", basev1, string(common.UserSignup)), bytes.NewBuffer(userBytes)) + + signupRequestDataHttpReq, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s%s", basev2, string(common.UserSignup)), bytes.NewBuffer(userBytes)) if err != nil { t.Fatal(err) } @@ -263,10 +267,10 @@ func TestApis(t *testing.T) { t.Fatal(err) } if signupRequestResp.StatusCode != http.StatusCreated { - t.Fatal(err) + t.Fatal("Signup failed", signupRequestResp.StatusCode) } - userLoginHttpReq, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s%s", basev1, string(common.UserLogin)), bytes.NewBuffer(userBytes)) + userLoginHttpReq, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s%s", basev2, string(common.UserLogin)), bytes.NewBuffer(userBytes)) if err != nil { t.Fatal(err) @@ -284,39 +288,463 @@ func TestApis(t *testing.T) { if userLoginResp.StatusCode != http.StatusOK { t.Fatal("user should be able to login") } + cookie := userLoginResp.Header["Set-Cookie"] - migrateRequest := &common.UserRequest{Password: userRequest.Password} - migrateRequestData, err := json.Marshal(migrateRequest) + // pod new + podRequest := &common.PodRequest{ + PodName: randStringRunes(16), + Password: userRequest.Password, + } + podBytes, err := json.Marshal(podRequest) if err != nil { t.Fatal(err) } - userMigrateHttpReq, err := http.NewRequest(http.MethodPost, basev2+"/user/migrate", bytes.NewBuffer(migrateRequestData)) + podNewHttpReq, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s%s", basev1, string(common.PodNew)), bytes.NewBuffer(podBytes)) if err != nil { t.Fatal(err) } - userMigrateHttpReq.Header.Add("Content-Type", "application/json") - userMigrateHttpReq.Header.Add("Content-Length", strconv.Itoa(len(migrateRequestData))) - userMigrateHttpReq.Header.Set("Cookie", cookie[0]) - useMigrateResp, err := c.Do(userMigrateHttpReq) + podNewHttpReq.Header.Set("Cookie", cookie[0]) + podNewHttpReq.Header.Add("Content-Type", "application/json") + podNewHttpReq.Header.Add("Content-Length", strconv.Itoa(len(podBytes))) + podNewResp, err := c.Do(podNewHttpReq) if err != nil { t.Fatal(err) } - err = useMigrateResp.Body.Close() + err = podNewResp.Body.Close() if err != nil { t.Fatal(err) } - if useMigrateResp.StatusCode != http.StatusOK { - t.Fatal("user should be migrated") + if podNewResp.StatusCode != 201 { + t.Fatal("pod creation failed") + } + entries := []struct { + path string + isDir bool + size int64 + content []byte + }{ + { + path: "/dir1", + isDir: true, + }, + { + path: "/dir2", + isDir: true, + }, + { + path: "/dir3", + isDir: true, + }, + { + path: "/file1", + size: 1024 * 1024, + }, + { + path: "/dir1/file11", + size: 1024 * 512, + }, + { + path: "/dir1/file12", + size: 1024 * 1024, + }, + { + path: "/dir3/file31", + size: 1024 * 1024, + }, + { + path: "/dir3/file32", + size: 1024 * 1024, + }, + { + path: "/dir3/file33", + size: 1024, + }, + { + path: "/dir2/dir4", + isDir: true, + }, + { + path: "/dir2/dir4/dir5", + isDir: true, + }, + { + path: "/dir2/dir4/file241", + size: 5 * 1024 * 1024, + }, + { + path: "/dir2/dir4/dir5/file2451", + size: 10 * 1024 * 1024, + }, + } + + for _, v := range entries { + if v.isDir { + mkdirRqst := common.FileSystemRequest{ + PodName: podRequest.PodName, + DirectoryPath: v.path, + } + mkDirBytes, err := json.Marshal(mkdirRqst) + if err != nil { + t.Fatal(err) + } + mkDirHttpReq, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s%s", basev1, string(common.DirMkdir)), bytes.NewBuffer(mkDirBytes)) + if err != nil { + t.Fatal(err) + + } + mkDirHttpReq.Header.Set("Cookie", cookie[0]) + mkDirHttpReq.Header.Add("Content-Type", "application/json") + mkDirHttpReq.Header.Add("Content-Length", strconv.Itoa(len(mkDirBytes))) + mkDirResp, err := c.Do(mkDirHttpReq) + if err != nil { + t.Fatal(err) + } + err = mkDirResp.Body.Close() + if err != nil { + t.Fatal(err) + } + if mkDirResp.StatusCode != 201 { + t.Fatal("mkdir failed") + } + } else { + body := new(bytes.Buffer) + writer := multipart.NewWriter(body) + contentLength := fmt.Sprintf("%d", v.size) + + err = writer.WriteField("podName", podRequest.PodName) + if err != nil { + t.Fatal(err) + } + err = writer.WriteField("contentLength", contentLength) + if err != nil { + t.Fatal(err) + } + err = writer.WriteField("dirPath", filepath.Dir(v.path)) + if err != nil { + t.Fatal(err) + } + err = writer.WriteField("blockSize", "4kb") + if err != nil { + t.Fatal(err) + } + part, err := writer.CreateFormFile("files", filepath.Base(v.path)) + if err != nil { + t.Fatal(err) + } + reader := &io.LimitedReader{R: rand.Reader, N: v.size} + _, err = io.Copy(part, reader) + if err != nil { + t.Fatal(err) + } + err = writer.Close() + if err != nil { + t.Fatal(err) + } + + uploadReq, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s%s", basev1, string(common.FileUpload)), body) + if err != nil { + t.Fatal(err) + + } + uploadReq.Header.Set("Cookie", cookie[0]) + contentType := fmt.Sprintf("multipart/form-data;boundary=%v", writer.Boundary()) + uploadReq.Header.Add("Content-Type", contentType) + uploadResp, err := c.Do(uploadReq) + if err != nil { + t.Fatal(err) + } + err = uploadResp.Body.Close() + if err != nil { + t.Fatal(err) + } + if uploadResp.StatusCode != 200 { + t.Fatal("upload failed") + } + } + } + + for _, v := range entries { + if v.isDir { + statReq, err := http.NewRequest(http.MethodGet, fmt.Sprintf("%s%s?podName=%s&dirPath=%s", basev1, string(common.DirStat), podRequest.PodName, v.path), http.NoBody) + if err != nil { + t.Fatal(err) + + } + statReq.Header.Set("Cookie", cookie[0]) + statResp, err := c.Do(statReq) + if err != nil { + t.Fatal(err) + } + err = statResp.Body.Close() + if err != nil { + t.Fatal(err) + } + if statResp.StatusCode != 200 { + t.Fatal("dir stat failed") + } + } else { + if v.isDir { + statReq, err := http.NewRequest(http.MethodGet, fmt.Sprintf("%s%s?podName=%s&dirPath=%s", basev1, string(common.FileStat), podRequest.PodName, v.path), http.NoBody) + if err != nil { + t.Fatal(err) + + } + statReq.Header.Set("Cookie", cookie[0]) + statResp, err := c.Do(statReq) + if err != nil { + t.Fatal(err) + } + err = statResp.Body.Close() + if err != nil { + t.Fatal(err) + } + if statResp.StatusCode != 200 { + t.Fatal("file stat failed") + } + } + } + } + // rename file "/dir2/dir4/dir5/file2451" => "/dir2/dir4/dir5/file24511" + renames := []struct { + oldPath string + newPath string + isDir bool + }{ + { + oldPath: "/dir2/dir4/dir5/file2451", + newPath: "/dir2/dir4/dir5/file24511", + isDir: false, + }, + { + oldPath: "/dir2/dir4/dir5/file24511", + newPath: "/file24511", + isDir: false, + }, + { + oldPath: "/dir2", + newPath: "/dir2020", + isDir: true, + }, + { + oldPath: "/dir2020/dir4", + newPath: "/dir2020/dir4040", + isDir: true, + }, { + oldPath: "/dir3/file33", + newPath: "/dir2020/file33", + isDir: false, + }, { + oldPath: "/dir1/file12", + newPath: "/dir2020/dir4040/file12", + isDir: false, + }, + } + for _, v := range renames { + renameReq := common.RenameRequest{ + PodName: podRequest.PodName, + OldPath: v.oldPath, + NewPath: v.newPath, + } + + renameBytes, err := json.Marshal(renameReq) + if err != nil { + t.Fatal(err) + } + url := common.FileRename + if v.isDir { + url = common.DirRename + } + renameHttpReq, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s%s", basev1, string(url)), bytes.NewBuffer(renameBytes)) + if err != nil { + t.Fatal(err) + + } + renameHttpReq.Header.Set("Cookie", cookie[0]) + renameHttpReq.Header.Add("Content-Type", "application/json") + renameHttpReq.Header.Add("Content-Length", strconv.Itoa(len(renameBytes))) + renameResp, err := c.Do(renameHttpReq) + if err != nil { + t.Fatal(err) + } + err = renameResp.Body.Close() + if err != nil { + t.Fatal(err) + } + if renameResp.StatusCode != 200 { + t.Fatal("rename failed", url) + } + } + + newEntries := []struct { + path string + isDir bool + size int64 + content []byte + }{ + { + path: "/dir1", + isDir: true, + }, + { + path: "/dir2020", + isDir: true, + }, + { + path: "/dir3", + isDir: true, + }, + { + path: "/file1", + size: 1024 * 1024, + }, + { + path: "/dir1/file11", + size: 1024 * 512, + }, + { + path: "/dir2020/dir4040/file12", + size: 1024 * 1024, + }, + { + path: "/dir3/file31", + size: 1024 * 1024, + }, + { + path: "/dir3/file32", + size: 1024 * 1024, + }, + { + path: "/dir2020/file33", + size: 1024, + }, + { + path: "/dir2020/dir4040", + isDir: true, + }, + { + path: "/dir2020/dir4040/dir5", + isDir: true, + }, + { + path: "/dir2020/dir4040/file241", + size: 5 * 1024 * 1024, + }, + { + path: "/file24511", + size: 10 * 1024 * 1024, + }, + } + for _, v := range newEntries { + if v.isDir { + statReq, err := http.NewRequest(http.MethodGet, fmt.Sprintf("%s%s?podName=%s&dirPath=%s", basev1, string(common.DirStat), podRequest.PodName, v.path), http.NoBody) + if err != nil { + t.Fatal(err) + + } + statReq.Header.Set("Cookie", cookie[0]) + statResp, err := c.Do(statReq) + if err != nil { + t.Fatal(err) + } + err = statResp.Body.Close() + if err != nil { + t.Fatal(err) + } + if statResp.StatusCode != 200 { + t.Fatal("dir stat failed") + } + } else { + if v.isDir { + statReq, err := http.NewRequest(http.MethodGet, fmt.Sprintf("%s%s?podName=%s&dirPath=%s", basev1, string(common.FileStat), podRequest.PodName, v.path), http.NoBody) + if err != nil { + t.Fatal(err) + + } + statReq.Header.Set("Cookie", cookie[0]) + statResp, err := c.Do(statReq) + if err != nil { + t.Fatal(err) + } + err = statResp.Body.Close() + if err != nil { + t.Fatal(err) + } + if statResp.StatusCode != 200 { + t.Fatal("file stat failed") + } + } + } } }) - t.Run("signup-login-migrate-new-username", func(t *testing.T) { - c := http.Client{Timeout: time.Duration(1) * time.Minute} - - userRequest := &common.UserRequest{ + t.Run("ws test", func(t *testing.T) { + u := url.URL{Scheme: "ws", Host: base, Path: "/ws/v1/"} + header := http.Header{} + header.Set("Origin", "http://localhost:3000") + c, _, err := websocket.DefaultDialer.Dial(u.String(), header) + if err != nil { + t.Fatal("dial:", err) + } + defer c.Close() + + downloadFn := func(cl string) { + mt2, reader, err := c.NextReader() + if mt2 != websocket.BinaryMessage { + t.Fatal("non binary message while download") + } + if err != nil { + t.Fatal("download failed", err) + } + + fo, err := os.CreateTemp(os.TempDir(), fmt.Sprintf("%d", time.Now().Unix())) + if err != nil { + t.Fatal("download failed", err) + } + // close fo on exit and check for its returned error + defer func() { + if err := fo.Close(); err != nil { + t.Fatal("download failed", err) + } + }() + n, err := io.Copy(fo, reader) + if err != nil { + t.Fatal("download failed", err) + } + if fmt.Sprintf("%d", n) == cl { + return + } + } + + go func() { + for { + mt, message, err := c.ReadMessage() + if err != nil { + return + } + if mt == 1 { + res := &common.WebsocketResponse{} + if err := json.Unmarshal(message, res); err != nil { + t.Error("got error ", err) + continue + } + if res.Event == common.FileDownload { + params := res.Params.(map[string]interface{}) + cl := fmt.Sprintf("%v", params["content_length"]) + downloadFn(cl) + continue + } + if res.StatusCode != 200 && res.StatusCode != 201 { + t.Errorf("%s failed: %s\n", res.Event, res.Params) + continue + } + } + } + }() + + userRequest := &common.UserSignupRequest{ UserName: randStringRunes(16), Password: randStringRunes(8), } @@ -326,14 +754,15 @@ func TestApis(t *testing.T) { t.Fatal(err) } - //create v2 user signupRequestDataHttpReq, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s%s", basev2, string(common.UserSignup)), bytes.NewBuffer(userBytes)) if err != nil { - return + t.Fatal(err) } signupRequestDataHttpReq.Header.Add("Content-Type", "application/json") signupRequestDataHttpReq.Header.Add("Content-Length", strconv.Itoa(len(userBytes))) - signupRequestResp, err := c.Do(signupRequestDataHttpReq) + + httpClient := http.Client{Timeout: time.Duration(1) * time.Minute} + signupRequestResp, err := httpClient.Do(signupRequestDataHttpReq) if err != nil { t.Fatal(err) } @@ -346,405 +775,602 @@ func TestApis(t *testing.T) { t.Fatal("Signup failed", signupRequestResp.StatusCode) } - // create user with same name in v1 to test migration with different name - signupRequestDataHttpReq, err = http.NewRequest(http.MethodPost, fmt.Sprintf("%s%s", basev1, string(common.UserSignup)), bytes.NewBuffer(userBytes)) + // userLogin + podName := fmt.Sprintf("%d", time.Now().UnixNano()) + + login := &common.WebsocketRequest{ + Event: common.UserLoginV2, + Params: userRequest, + } + + data, err := json.Marshal(login) if err != nil { - t.Fatal(err) + t.Fatal("failed to marshal login request: ", err) } - signupRequestDataHttpReq.Header.Add("Content-Type", "application/json") - signupRequestDataHttpReq.Header.Add("Content-Length", strconv.Itoa(len(userBytes))) - signupRequestResp, err = c.Do(signupRequestDataHttpReq) + err = c.WriteMessage(websocket.TextMessage, data) if err != nil { - t.Fatal(err) + t.Fatal("write:", err) } - err = signupRequestResp.Body.Close() + // userPresent + uPresent := &common.WebsocketRequest{ + Event: common.UserPresentV2, + Params: common.UserSignupRequest{ + UserName: userRequest.UserName, + }, + } + data, err = json.Marshal(uPresent) if err != nil { t.Fatal(err) } - if signupRequestResp.StatusCode != http.StatusCreated { - t.Fatal("Signup failed", signupRequestResp.StatusCode) - return - } - - userLoginHttpReq, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s%s", basev1, string(common.UserLogin)), bytes.NewBuffer(userBytes)) + err = c.WriteMessage(websocket.TextMessage, data) if err != nil { t.Fatal(err) + } + // userLoggedIN + uLoggedIn := &common.WebsocketRequest{ + Event: common.UserIsLoggedin, + Params: common.UserSignupRequest{ + UserName: userRequest.UserName, + }, } - userLoginHttpReq.Header.Add("Content-Type", "application/json") - userLoginHttpReq.Header.Add("Content-Length", strconv.Itoa(len(userBytes))) - userLoginResp, err := c.Do(userLoginHttpReq) + data, err = json.Marshal(uLoggedIn) if err != nil { t.Fatal(err) } - err = userLoginResp.Body.Close() + err = c.WriteMessage(websocket.TextMessage, data) if err != nil { t.Fatal(err) } - if userLoginResp.StatusCode != http.StatusOK { - t.Fatal("user should be able to login") + + // userStat + userStat := &common.WebsocketRequest{ + Event: common.UserStat, } - cookie := userLoginResp.Header["Set-Cookie"] - userStatHttpReq, err := http.NewRequest(http.MethodGet, basev1+"/user/stat", http.NoBody) + data, err = json.Marshal(userStat) if err != nil { t.Fatal(err) - } - userStatHttpReq.Header.Set("Cookie", cookie[0]) - userStatResp, err := c.Do(userStatHttpReq) + err = c.WriteMessage(websocket.TextMessage, data) if err != nil { t.Fatal(err) } - useStateBodyBytes, err := io.ReadAll(userStatResp.Body) + + // podNew + podNew := &common.WebsocketRequest{ + Event: common.PodNew, + Params: common.PodRequest{ + PodName: podName, + Password: userRequest.Password, + }, + } + data, err = json.Marshal(podNew) if err != nil { t.Fatal(err) } - - err = userStatResp.Body.Close() + err = c.WriteMessage(websocket.TextMessage, data) if err != nil { t.Fatal(err) } - userStat := &user.Stat{} - err = json.Unmarshal(useStateBodyBytes, userStat) + + // podLs + podLs := &common.WebsocketRequest{ + Event: common.PodLs, + } + data, err = json.Marshal(podLs) if err != nil { t.Fatal(err) } - userMigrateHttpReq, err := http.NewRequest(http.MethodPost, basev2+"/user/migrate", bytes.NewBuffer(userBytes)) + err = c.WriteMessage(websocket.TextMessage, data) if err != nil { t.Fatal(err) + } + // mkdir + mkDir := &common.WebsocketRequest{ + Event: common.DirMkdir, + Params: common.FileRequest{ + PodName: podName, + DirPath: "/d", + }, } - userMigrateHttpReq.Header.Add("Content-Type", "application/json") - userMigrateHttpReq.Header.Add("Content-Length", strconv.Itoa(len(userBytes))) - userMigrateHttpReq.Header.Set("Cookie", cookie[0]) - useMigrateResp, err := c.Do(userMigrateHttpReq) + data, err = json.Marshal(mkDir) if err != nil { t.Fatal(err) } - - err = useMigrateResp.Body.Close() + err = c.WriteMessage(websocket.TextMessage, data) if err != nil { t.Fatal(err) } - if useMigrateResp.StatusCode == http.StatusOK { - t.Fatal("migration should fail as username is already taken") + + // rmDir + rmDir := &common.WebsocketRequest{ + Event: common.DirRmdir, + Params: common.FileRequest{ + PodName: podName, + DirPath: "/d", + }, } - newUsername := randStringRunes(16) - migrateRequest := &common.UserRequest{Password: userRequest.Password, UserName: newUsername} - migrateRequestData, err := json.Marshal(migrateRequest) + data, err = json.Marshal(rmDir) if err != nil { t.Fatal(err) } - userMigrateHttpReq, err = http.NewRequest(http.MethodPost, basev2+"/user/migrate", bytes.NewBuffer(migrateRequestData)) + err = c.WriteMessage(websocket.TextMessage, data) if err != nil { t.Fatal(err) + } + // dirLs + dirLs := &common.WebsocketRequest{ + Event: common.DirLs, + Params: common.FileRequest{ + PodName: podName, + DirPath: "/", + }, } - userMigrateHttpReq.Header.Add("Content-Type", "application/json") - userMigrateHttpReq.Header.Add("Content-Length", strconv.Itoa(len(migrateRequestData))) - userMigrateHttpReq.Header.Set("Cookie", cookie[0]) - useMigrateResp, err = c.Do(userMigrateHttpReq) + data, err = json.Marshal(dirLs) if err != nil { t.Fatal(err) } - - err = useMigrateResp.Body.Close() + err = c.WriteMessage(websocket.TextMessage, data) if err != nil { t.Fatal(err) } - if useMigrateResp.StatusCode != http.StatusOK { - t.Fatal("user should be migrated") - } - userLoginHttpReq, err = http.NewRequest(http.MethodPost, fmt.Sprintf("%s%s", basev2, string(common.UserLogin)), bytes.NewBuffer(migrateRequestData)) + // dirStat + dirStat := &common.WebsocketRequest{ + Event: common.DirStat, + Params: common.FileRequest{ + PodName: podName, + DirPath: "/", + }, + } + data, err = json.Marshal(dirStat) if err != nil { t.Fatal(err) - } - userLoginHttpReq.Header.Add("Content-Type", "application/json") - userLoginHttpReq.Header.Add("Content-Length", strconv.Itoa(len(migrateRequestData))) - userLoginResp, err = c.Do(userLoginHttpReq) + err = c.WriteMessage(websocket.TextMessage, data) if err != nil { t.Fatal(err) } - err = userLoginResp.Body.Close() + + // dirPresent + dirPresent := &common.WebsocketRequest{ + Event: common.DirIsPresent, + Params: common.FileRequest{ + PodName: podName, + DirPath: "/d", + }, + } + data, err = json.Marshal(dirPresent) if err != nil { t.Fatal(err) } - if userLoginResp.StatusCode != http.StatusOK { - t.Fatal("user should be able to login") + err = c.WriteMessage(websocket.TextMessage, data) + if err != nil { + t.Fatal(err) } - cookie = userLoginResp.Header["Set-Cookie"] - userStatHttpReq, err = http.NewRequest(http.MethodGet, basev1+"/user/stat", http.NoBody) + // Upload + upload := &common.WebsocketRequest{ + Event: common.FileUpload, + Params: common.FileRequest{ + PodName: podName, + DirPath: "/", + BlockSize: "1Mb", + FileName: "README.md", + }, + } + data, err = json.Marshal(upload) if err != nil { t.Fatal(err) - } - userStatHttpReq.Header.Set("Cookie", cookie[0]) - userStatResp, err = c.Do(userStatHttpReq) + err = c.WriteMessage(websocket.TextMessage, data) if err != nil { t.Fatal(err) } - useStateBodyBytes, err = io.ReadAll(userStatResp.Body) + file, err := os.Open("../../../README.md") + if err != nil { + panic(err) + } + defer file.Close() + body := &bytes.Buffer{} + _, err = io.Copy(body, file) if err != nil { t.Fatal(err) } - err = userStatResp.Body.Close() + err = c.WriteMessage(websocket.BinaryMessage, body.Bytes()) if err != nil { t.Fatal(err) } - userStat2 := &user.Stat{} - err = json.Unmarshal(useStateBodyBytes, userStat2) + // Download + download := &common.WebsocketRequest{ + Event: common.FileDownload, + Params: common.FileDownloadRequest{ + PodName: podName, + Filepath: "/README.md", + }, + } + data, err = json.Marshal(download) if err != nil { t.Fatal(err) } - if userStat.Name == userStat2.Name { - t.Fatal("username should not be same after migration with different username") - } - if userStat.Reference != userStat2.Reference { - t.Fatal("completely different user stats") + err = c.WriteMessage(websocket.TextMessage, data) + if err != nil { + t.Fatal(err) } - }) - - t.Run("signup-login-migrate-with-pod-and-file-upload-download", func(t *testing.T) { - c := http.Client{Timeout: time.Duration(1) * time.Minute} - userRequest := &common.UserRequest{ - UserName: randStringRunes(16), - Password: randStringRunes(8), + // stat + stat := &common.WebsocketRequest{ + Event: common.FileStat, + Params: common.FileSystemRequest{ + PodName: podName, + DirectoryPath: "/README.md", + }, } - - userBytes, err := json.Marshal(userRequest) + data, err = json.Marshal(stat) + if err != nil { + t.Fatal(err) + } + err = c.WriteMessage(websocket.TextMessage, data) if err != nil { t.Fatal(err) } - // create user v1 - signupRequestDataHttpReq, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s%s", basev1, string(common.UserSignup)), bytes.NewBuffer(userBytes)) + table := "kv_1" + // kvCreate + kvCreate := &common.WebsocketRequest{ + Event: common.KVCreate, + Params: common.KVRequest{ + PodName: podName, + TableName: table, + IndexType: "string", + }, + } + data, err = json.Marshal(kvCreate) if err != nil { t.Fatal(err) } - signupRequestDataHttpReq.Header.Add("Content-Type", "application/json") - signupRequestDataHttpReq.Header.Add("Content-Length", strconv.Itoa(len(userBytes))) - signupRequestResp, err := c.Do(signupRequestDataHttpReq) + err = c.WriteMessage(websocket.TextMessage, data) if err != nil { t.Fatal(err) } - err = signupRequestResp.Body.Close() + // kvList + kvList := &common.WebsocketRequest{ + Event: common.KVList, + Params: common.KVRequest{ + PodName: podName, + }, + } + data, err = json.Marshal(kvList) if err != nil { t.Fatal(err) } - if signupRequestResp.StatusCode != http.StatusCreated { - t.Fatal("Signup failed", signupRequestResp.StatusCode) - return - } - - userLoginHttpReq, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s%s", basev1, string(common.UserLogin)), bytes.NewBuffer(userBytes)) + err = c.WriteMessage(websocket.TextMessage, data) if err != nil { t.Fatal(err) + } + // kvOpen + kvOpen := &common.WebsocketRequest{ + Event: common.KVOpen, + Params: common.KVRequest{ + PodName: podName, + TableName: table, + }, } - userLoginHttpReq.Header.Add("Content-Type", "application/json") - userLoginHttpReq.Header.Add("Content-Length", strconv.Itoa(len(userBytes))) - userLoginResp, err := c.Do(userLoginHttpReq) + data, err = json.Marshal(kvOpen) if err != nil { t.Fatal(err) } - err = userLoginResp.Body.Close() + err = c.WriteMessage(websocket.TextMessage, data) if err != nil { t.Fatal(err) } - if userLoginResp.StatusCode != http.StatusOK { - t.Fatal("user should be able to login") - } - cookie := userLoginResp.Header["Set-Cookie"] - podReq := &common.PodRequest{ - PodName: randStringRunes(16), - Password: userRequest.Password, + // kvEntryPut + kvEntryPut := &common.WebsocketRequest{ + Event: common.KVEntryPut, + Params: common.KVRequest{ + PodName: podName, + TableName: table, + Key: "key1", + Value: "value1", + }, } - podReqData, err := json.Marshal(podReq) + data, err = json.Marshal(kvEntryPut) if err != nil { t.Fatal(err) } - - podHttpReq, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s%s", basev1, string(common.PodNew)), bytes.NewBuffer(podReqData)) + err = c.WriteMessage(websocket.TextMessage, data) if err != nil { t.Fatal(err) } - podHttpReq.Header.Add("Content-Type", "application/json") - podHttpReq.Header.Add("Content-Length", strconv.Itoa(len(podReqData))) - podHttpReq.Header.Set("Cookie", cookie[0]) - podNewResp, err := c.Do(podHttpReq) + + // kvCount + kvCount := &common.WebsocketRequest{ + Event: common.KVCount, + Params: common.KVRequest{ + PodName: podName, + TableName: table, + }, + } + data, err = json.Marshal(kvCount) if err != nil { t.Fatal(err) } - err = podNewResp.Body.Close() + err = c.WriteMessage(websocket.TextMessage, data) if err != nil { t.Fatal(err) } - if podNewResp.StatusCode != http.StatusCreated { - t.Fatal("pod new failed") - } - uploadBuf := new(bytes.Buffer) - fileName := fmt.Sprintf("file_%d", time.Now().Unix()) - uploadWriter := multipart.NewWriter(uploadBuf) - dataBytes := []byte(fmt.Sprintf("Latest updates %d", time.Now().Unix())) - err = uploadWriter.WriteField("pod_name", podReq.PodName) + // kvGet + kvGet := &common.WebsocketRequest{ + Event: common.KVEntryGet, + Params: common.KVRequest{ + PodName: podName, + TableName: table, + Key: "key1", + }, + } + data, err = json.Marshal(kvGet) if err != nil { - t.Fatal("pod new failed") + t.Fatal(err) } - err = uploadWriter.WriteField("dir_path", "/") + err = c.WriteMessage(websocket.TextMessage, data) if err != nil { - t.Fatal("pod new failed") + t.Fatal(err) } - err = uploadWriter.WriteField("block_size", "1Mb") + + // kvSeek + kvSeek := &common.WebsocketRequest{ + Event: common.KVSeek, + Params: common.KVRequest{ + PodName: podName, + TableName: table, + StartPrefix: "key", + }, + } + data, err = json.Marshal(kvSeek) if err != nil { - t.Fatal("pod new failed") + t.Fatal(err) } - err = uploadWriter.WriteField("content_length", fmt.Sprintf("%d", len(dataBytes))) + err = c.WriteMessage(websocket.TextMessage, data) if err != nil { - t.Fatal("pod new failed") + t.Fatal(err) } - uploadPart, err := uploadWriter.CreateFormFile("files", fileName) + + // kvSeek + kvSeekNext := &common.WebsocketRequest{ + Event: common.KVSeekNext, + Params: common.KVRequest{ + PodName: podName, + TableName: table, + }, + } + data, err = json.Marshal(kvSeekNext) if err != nil { t.Fatal(err) } - _, err = io.Copy(uploadPart, bytes.NewReader(dataBytes)) + err = c.WriteMessage(websocket.TextMessage, data) if err != nil { t.Fatal(err) } - err = uploadWriter.Close() + + // kvEntryDel + kvEntryDel := &common.WebsocketRequest{ + Event: common.KVEntryDelete, + Params: common.KVRequest{ + PodName: podName, + TableName: table, + Key: "key1", + }, + } + data, err = json.Marshal(kvEntryDel) if err != nil { t.Fatal(err) } - contentType := fmt.Sprintf("multipart/form-data;boundary=%v", uploadWriter.Boundary()) - uploadHttpReq, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s%s", basev1, string(common.FileUpload)), uploadBuf) + err = c.WriteMessage(websocket.TextMessage, data) if err != nil { t.Fatal(err) } - uploadHttpReq.Header.Set("Content-Type", contentType) - if cookie != nil { - uploadHttpReq.Header.Set("Cookie", cookie[0]) + + docTable := "doc_1" + // docCreate + docCreate := &common.WebsocketRequest{ + Event: common.DocCreate, + Params: common.DocRequest{ + PodName: podName, + TableName: docTable, + SimpleIndex: "first_name=string,age=number", + Mutable: true, + }, } - uploadResp, err := c.Do(uploadHttpReq) + data, err = json.Marshal(docCreate) if err != nil { t.Fatal(err) } - err = uploadResp.Body.Close() + err = c.WriteMessage(websocket.TextMessage, data) if err != nil { t.Fatal(err) } - if uploadResp.StatusCode != http.StatusOK { - t.Fatal("upload failed") - } - userMigrateHttpReq, err := http.NewRequest(http.MethodPost, basev2+"/user/migrate", bytes.NewBuffer(userBytes)) + // docLs + docLs := &common.WebsocketRequest{ + Event: common.DocList, + Params: common.DocRequest{ + PodName: podName, + TableName: docTable, + }, + } + data, err = json.Marshal(docLs) if err != nil { t.Fatal(err) - } - userMigrateHttpReq.Header.Add("Content-Type", "application/json") - userMigrateHttpReq.Header.Add("Content-Length", strconv.Itoa(len(userBytes))) - userMigrateHttpReq.Header.Set("Cookie", cookie[0]) - useMigrateResp, err := c.Do(userMigrateHttpReq) + err = c.WriteMessage(websocket.TextMessage, data) if err != nil { t.Fatal(err) } - err = useMigrateResp.Body.Close() + // docOpen + docOpen := &common.WebsocketRequest{ + Event: common.DocOpen, + Params: common.DocRequest{ + PodName: podName, + TableName: docTable, + }, + } + data, err = json.Marshal(docOpen) if err != nil { t.Fatal(err) } - if useMigrateResp.StatusCode != http.StatusOK { - t.Fatal("user should be migrated") + err = c.WriteMessage(websocket.TextMessage, data) + if err != nil { + t.Fatal(err) } - userLoginHttpReq, err = http.NewRequest(http.MethodPost, fmt.Sprintf("%s%s", basev2, string(common.UserLogin)), bytes.NewBuffer(userBytes)) + // docEntryPut + docEntryPut := &common.WebsocketRequest{ + Event: common.DocEntryPut, + Params: common.DocRequest{ + PodName: podName, + TableName: docTable, + Document: `{"id":"1", "first_name": "Hello1", "age": 11}`, + }, + } + data, err = json.Marshal(docEntryPut) + if err != nil { + t.Fatal(err) + } + err = c.WriteMessage(websocket.TextMessage, data) if err != nil { t.Fatal(err) + } + // docEntryGet + docEntryGet := &common.WebsocketRequest{ + Event: common.DocEntryGet, + Params: common.DocRequest{ + PodName: podName, + TableName: docTable, + ID: "1", + }, } - userLoginHttpReq.Header.Add("Content-Type", "application/json") - userLoginHttpReq.Header.Add("Content-Length", strconv.Itoa(len(userBytes))) - userLoginResp, err = c.Do(userLoginHttpReq) + data, err = json.Marshal(docEntryGet) if err != nil { t.Fatal(err) } - err = userLoginResp.Body.Close() + err = c.WriteMessage(websocket.TextMessage, data) if err != nil { t.Fatal(err) } - if userLoginResp.StatusCode != http.StatusOK { - t.Fatal("user should be able to login") + + // docFind + docFind := &common.WebsocketRequest{ + Event: common.DocFind, + Params: common.DocRequest{ + PodName: podName, + TableName: docTable, + Expression: `age>10`, + }, } - cookie = userLoginResp.Header["Set-Cookie"] - podOpenHttpReq, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s%s", basev1, string(common.PodOpen)), bytes.NewBuffer(podReqData)) + data, err = json.Marshal(docFind) if err != nil { t.Fatal(err) - } - podOpenHttpReq.Header.Add("Content-Type", "application/json") - podOpenHttpReq.Header.Add("Content-Length", strconv.Itoa(len(podReqData))) - podOpenHttpReq.Header.Set("Cookie", cookie[0]) - podOpenResp, err := c.Do(podOpenHttpReq) + err = c.WriteMessage(websocket.TextMessage, data) if err != nil { t.Fatal(err) + } + // docCount + docCount := &common.WebsocketRequest{ + Event: common.DocCount, + Params: common.DocRequest{ + PodName: podName, + TableName: docTable, + }, } - err = podOpenResp.Body.Close() + data, err = json.Marshal(docCount) if err != nil { t.Fatal(err) - } - if podOpenResp.StatusCode != http.StatusOK { - t.Fatal("pod open failed") - + err = c.WriteMessage(websocket.TextMessage, data) + if err != nil { + t.Fatal(err) } - downloadBuf := new(bytes.Buffer) - downloadWriter := multipart.NewWriter(downloadBuf) - err = downloadWriter.WriteField("pod_name", podReq.PodName) + // docEntryGet + docEntryDel := &common.WebsocketRequest{ + Event: common.DocEntryDel, + Params: common.DocRequest{ + PodName: podName, + TableName: docTable, + ID: "1", + }, + } + data, err = json.Marshal(docEntryDel) if err != nil { - t.Fatal("pod new failed") + t.Fatal(err) } - err = downloadWriter.WriteField("file_path", fmt.Sprintf("/%s", fileName)) + err = c.WriteMessage(websocket.TextMessage, data) if err != nil { - t.Fatal("pod new failed") + t.Fatal(err) } - err = downloadWriter.Close() + // docDel + docDel := &common.WebsocketRequest{ + Event: common.DocDelete, + Params: common.DocRequest{ + PodName: podName, + TableName: docTable, + }, + } + data, err = json.Marshal(docDel) if err != nil { t.Fatal(err) - } - contentType = fmt.Sprintf("multipart/form-data;boundary=%v", downloadWriter.Boundary()) - downloadHttpReq, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s%s", basev1, string(common.FileDownload)), downloadBuf) + err = c.WriteMessage(websocket.TextMessage, data) if err != nil { t.Fatal(err) - } - downloadHttpReq.Header.Set("Content-Type", contentType) - if cookie != nil { - downloadHttpReq.Header.Set("Cookie", cookie[0]) + // user Logout + uLogout := &common.WebsocketRequest{ + Event: common.UserLogout, } - downloadResp, err := c.Do(downloadHttpReq) + data, err = json.Marshal(uLogout) if err != nil { t.Fatal(err) - } - err = downloadResp.Body.Close() + err = c.WriteMessage(websocket.TextMessage, data) if err != nil { t.Fatal(err) + } + // userLoggedIN + uLoggedIn = &common.WebsocketRequest{ + Event: common.UserIsLoggedin, + Params: common.UserSignupRequest{ + UserName: userRequest.UserName, + }, } - if downloadResp.StatusCode != http.StatusOK { - t.Fatal("download failed") + data, err = json.Marshal(uLoggedIn) + if err != nil { + t.Fatal(err) + } + err = c.WriteMessage(websocket.TextMessage, data) + if err != nil { + t.Fatal(err) + } + + err = c.WriteMessage(websocket.CloseMessage, []byte{}) + if err != nil { + t.Fatal("write:", err) } + // wait + <-time.After(time.Second) }) } diff --git a/download.sh b/download.sh index 702e28d1..a397c812 100755 --- a/download.sh +++ b/download.sh @@ -23,15 +23,27 @@ dfs_download() { exit 1 fi - eval curl -s https://api.github.com/repos/fairDataSociety/fairOS-dfs/releases/latest \ + if [[ "$2" == "latest" ]] ; then + eval curl -s https://api.github.com/repos/fairDataSociety/fairOS-dfs/releases/latest \ | grep "$1" \ | cut -d : -f 2,3 \ | tr -d \" \ | wget -qi - - + else + eval curl -s https://api.github.com/repos/fairDataSociety/fairOS-dfs/releases \ +| grep "$2/$1" \ +| cut -d : -f 2,3 \ +| tr -d \" \ +| wget -qi - + fi } install_dfs() { + VERSION="latest" + if [[ "$1" != "" ]]; then + VERSION="$1" + fi + BIN_NAME="dfs-" if [[ "$OSTYPE" == "linux-gnu" ]]; then @@ -51,6 +63,8 @@ install_dfs() { exit 1 fi + dfs_echo VERSION + ARCH=$(uname -m) echo " /@@@@@@ /@@ /@@@@@@ /@@@@@@ /@@ /@@@@@@" @@ -65,37 +79,29 @@ install_dfs() { echo "========== FairOs-dfs Installation ==========" echo "Detected OS: $DETECTED_OS" echo "Detected Architecture: $ARCH" - echo "=====================================================" + echo "Downloading Version: $VERSION" + echo "=============================================" if [[ "$ARCH" == "arm64" && $DETECTED_OS == "mac" ]]; then - BIN_NAME="dfs-darwin-amd64" - dfs_echo $BIN_NAME + BIN_NAME="dfs_darwin_arm64" + elif [[ "$ARCH" == "amd64" && $DETECTED_OS == "mac" ]]; then + BIN_NAME="dfs_darwin_amd64" elif [[ "$ARCH" == "x86_64" && $DETECTED_OS == "windows" ]]; then - BIN_NAME="dfs-windows-amd64.exe" - dfs_echo $BIN_NAME - elif [[ "$ARCH" == "x86_32" && $DETECTED_OS == "windows" ]]; then - BIN_NAME="dfs-windows-386.exe" - dfs_echo $BIN_NAME + BIN_NAME="dfs_windows_amd64.exe" elif [[ "$ARCH" == "arm64" && $DETECTED_OS == "linux" ]]; then - BIN_NAME="dfs-linux-arm64.exe" - dfs_echo $BIN_NAME - elif [[ "$ARCH" == "x86_32" && $DETECTED_OS == "linux" ]]; then - BIN_NAME="dfs-linux-386.exe" - dfs_echo $BIN_NAME + BIN_NAME="dfs_linux_arm64" elif [[ "$ARCH" == "x86_64" && $DETECTED_OS == "linux" ]]; then - BIN_NAME="dfs-linux-amd64.exe" - dfs_echo $BIN_NAME + BIN_NAME="dfs_linux_amd64" elif [[ "$ARCH" == "amd64" && $DETECTED_OS == "linux" ]]; then - BIN_NAME="dfs-linux-amd64.exe" - dfs_echo $BIN_NAME + BIN_NAME="dfs_linux_amd64" else dfs_echo "Error: unable to detect architecture. Please install manually by referring to $GH_README" exit 1 fi - - dfs_download $BIN_NAME + dfs_echo "Downloading $BIN_NAME" + dfs_download $BIN_NAME "$VERSION" } -install_dfs +install_dfs "$1" } \ No newline at end of file diff --git a/go.mod b/go.mod index c565102b..09d6646b 100644 --- a/go.mod +++ b/go.mod @@ -19,20 +19,24 @@ require ( github.com/klauspost/pgzip v1.2.5 github.com/miguelmota/go-ethereum-hdwallet v0.1.1 github.com/mitchellh/go-homedir v1.1.0 + github.com/plexsysio/taskmanager v0.0.0-20211220123746-de5ebdd49ae2 github.com/rs/cors v1.7.0 github.com/sirupsen/logrus v1.6.0 github.com/spf13/cobra v1.0.0 github.com/spf13/viper v1.7.0 + github.com/swaggo/http-swagger v1.3.3 + github.com/swaggo/swag v1.8.7 github.com/tinygrasshopper/bettercsv v0.0.1 github.com/tyler-smith/go-bip39 v1.1.0 github.com/wealdtech/go-ens/v3 v3.5.3 golang.org/x/crypto v0.0.0-20220214200702-86341886e292 - golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 + golang.org/x/term v0.1.0 gopkg.in/yaml.v2 v2.4.0 resenje.org/jsonhttp v0.2.0 ) require ( + github.com/KyleBanks/depth v1.2.1 // indirect github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 // indirect github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce // indirect @@ -40,15 +44,21 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect github.com/fsnotify/fsnotify v1.4.9 // indirect github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect + github.com/go-openapi/spec v0.20.7 // indirect + github.com/go-openapi/swag v0.22.3 // indirect github.com/go-stack/stack v1.8.1 // indirect github.com/google/uuid v1.3.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/ipfs/go-cid v0.1.0 // indirect + github.com/josharian/intern v1.0.0 // indirect github.com/klauspost/compress v1.11.7 // indirect github.com/klauspost/cpuid/v2 v2.0.11 // indirect github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect github.com/magiconair/properties v1.8.1 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.8 // indirect github.com/mattn/go-isatty v0.0.13 // indirect github.com/mattn/go-runewidth v0.0.13 // indirect @@ -74,15 +84,19 @@ require ( github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/subosito/gotenv v1.2.0 // indirect + github.com/swaggo/files v0.0.0-20220728132757-551d4a08d97a // indirect github.com/tklauser/go-sysconf v0.3.9 // indirect github.com/tklauser/numcpus v0.4.0 // indirect github.com/wealdtech/go-multicodec v1.4.0 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect - golang.org/x/net v0.0.0-20220722155237-a158d28d115b // indirect + go.uber.org/atomic v1.9.0 // indirect + golang.org/x/net v0.1.0 // indirect golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect - golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect - golang.org/x/text v0.3.7 // indirect + golang.org/x/sys v0.2.0 // indirect + golang.org/x/text v0.4.0 // indirect + golang.org/x/tools v0.2.0 // indirect gopkg.in/ini.v1 v1.57.0 // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.1.7 // indirect ) diff --git a/go.sum b/go.sum index e3ea2c5e..96402345 100644 --- a/go.sum +++ b/go.sum @@ -10,32 +10,15 @@ cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTj cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -45,7 +28,6 @@ dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1/go.mod h1:fBF9PQNqB8scdgpZ3ufzaLntG0AG7C1WjPMsiFOmfHM= @@ -66,36 +48,29 @@ github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= +github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc= +github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.26.1/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6RoTu1dAWCbrk+6WsEM8= github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o= github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alangpierce/go-forceexport v0.0.0-20160317203124-8f1d6941cd75/go.mod h1:uAXEEpARkRhCZfEvy/y0Jcc888f9tHCc1W7/UeEtreE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apilayer/freegeoip v3.5.0+incompatible/go.mod h1:CUfFqErhFhXneJendyQ/rRcuA8kH8JxHvYnbOozmlCU= github.com/aristanetworks/fsnotify v1.4.2/go.mod h1:D/rtu7LpjYM8tRJphJ0hUBYpjai8SfX+aSNsWDTq/Ks= github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3/go.mod h1:KASm+qXFKs/xjSoWn30NrWBBvdTTQq+UjkhjEJHfSFA= @@ -106,11 +81,7 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hC github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.2.0/go.mod h1:zEQs02YRBw1DjK0PoJv3ygDYOFTre1ejlJWl8FwAuQo= github.com/aws/aws-sdk-go-v2/config v1.1.1/go.mod h1:0XsVy9lBI/BCXm+2Tuvt39YmdHwS5unDQmxZOYe8F5Y= github.com/aws/aws-sdk-go-v2/credentials v1.1.1/go.mod h1:mM2iIjwl7LULWtS6JCACyInboHirisUUdkBPoTHMOUo= @@ -120,7 +91,6 @@ github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1/go.mod h1:rLiOUrPLW/Er5kRcQ7 github.com/aws/aws-sdk-go-v2/service/sso v1.1.1/go.mod h1:SuZJxklHxLAXgLTc1iFXbEWkXs7QRTQpCLGaKIprQW0= github.com/aws/aws-sdk-go-v2/service/sts v1.1.1/go.mod h1:Wi0EBZwiz/K44YliU0EKxqTCJGUfYTWXrrBwkq736bM= github.com/aws/smithy-go v1.1.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= @@ -159,9 +129,6 @@ github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7 github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= github.com/c-bata/go-prompt v0.2.3 h1:jjCS+QhG/sULBhAaBdjb2PlMRVaKXQgn+4yzaauvs2s= github.com/c-bata/go-prompt v0.2.3/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/casbin/casbin/v2 v2.35.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU= @@ -175,14 +142,10 @@ github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgk github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod h1:1MxXX1Ux4x6mqPmjkUgTP1CdXIBXKX7T+Jk9Gxrmx+U= github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ= github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q= @@ -192,15 +155,12 @@ github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= @@ -225,7 +185,6 @@ github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzA github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= -github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= @@ -244,11 +203,9 @@ github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498/go.mod h1:Mw6PkjjMXWbT github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= @@ -258,12 +215,9 @@ github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/elastic/gosigar v0.8.1-0.20180330100440-37f05ff46ffa/go.mod h1:cdorVVzy1fhmEqmtgqkoE3bYtCfSCkVyjTyCIo22xvs= github.com/elastic/gosigar v0.10.5/go.mod h1:cdorVVzy1fhmEqmtgqkoE3bYtCfSCkVyjTyCIo22xvs= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ethereum/go-ethereum v1.9.14/go.mod h1:oP8FC5+TbICUyftkTWs+8JryntjIJLJvWvApK3z2AYw= github.com/ethereum/go-ethereum v1.9.20/go.mod h1:JSSTypSMTkGZtAdAChH2wP5dZEvPGh3nUTuDpH+hNrg= @@ -277,9 +231,6 @@ github.com/ethersphere/bee v1.7.0 h1:VGn90M7oRBHnU56Vy4coVmgMHn3Bj8GM/ZzDe7jl4V4 github.com/ethersphere/bee v1.7.0/go.mod h1:lHJPrPnWf65zyVbw6ZPwJswnqFSPo9mZ7iiQmdwuRmo= github.com/ethersphere/bmt v0.1.4 h1:+rkWYNtMgDx6bkNqGdWu+U9DgGI1rRZplpSW3YhBr1Q= github.com/ethersphere/bmt v0.1.4/go.mod h1:Yd8ft1U69WDuHevZc/rwPxUv1rzPSMpMnS6xbU53aY8= -github.com/ethersphere/go-price-oracle-abi v0.1.0/go.mod h1:sI/Qj4/zJ23/b1enzwMMv0/hLTpPNVNacEwCWjo6yBk= -github.com/ethersphere/go-storage-incentives-abi v0.3.0/go.mod h1:SXvJVtM4sEsaSKD0jc1ClpDLw8ErPoROZDme4Wrc/Nc= -github.com/ethersphere/go-sw3-abi v0.4.0/go.mod h1:BmpsvJ8idQZdYEtWnvxA8POYQ8Rl/NhyCdF0zLMOOJU= github.com/ethersphere/langos v1.0.0/go.mod h1:dlcN2j4O8sQ+BlCaxeBu43bgr4RQ+inJ+pHwLeZg5Tw= github.com/ethersphere/manifest v0.3.6/go.mod h1:frSxQFT67hQvmTN5CBtgVuqHzGQpg0V0oIIm/B3Am+U= github.com/ethersphere/sw3-bindings/v2 v2.1.0/go.mod h1:ozMVBZZlAirS/FcUpFwzV60v8gC0nVbA/5ZXtCX3xCc= @@ -294,12 +245,9 @@ github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlK github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= -github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= @@ -320,43 +268,41 @@ github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJ github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= -github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= +github.com/go-openapi/spec v0.20.7 h1:1Rlu/ZrOCCob0n+JKKJAWhNWMPW8bOZRg8FJaY+0SKI= +github.com/go-openapi/spec v0.20.7/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog= github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -367,17 +313,13 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2-0.20190517061210-b285ee9cfc6c/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= @@ -405,7 +347,6 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= @@ -415,20 +356,14 @@ github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6 github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.4/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.5/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -441,16 +376,12 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 h1:l5lAOZEym3oK3SQ2HBHWsJUfbNBiTXJDeW2QDxw9AQ0= github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -461,18 +392,13 @@ github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1 github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE= github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0= @@ -480,14 +406,12 @@ github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -507,7 +431,6 @@ github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= github.com/howeyc/fsnotify v0.9.0/go.mod h1:41HzSPxBGeFRQKEEwgh49TRw/nKBsYZ2cF1OzPjSJsA= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= github.com/huin/goupnp v1.0.1-0.20210310174557-0ca763054c88/go.mod h1:nNs7wvRfN1eKaMknBydLNQU6146XQim8t4h+q90biWo= github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM= @@ -544,17 +467,14 @@ github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAK github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s= github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= -github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek= github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-ds-leveldb v0.5.0/go.mod h1:d3XG9RUDzQ6V4SHi8+Xgj9j1XuEk1z82lquxrVbml/Q= github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= @@ -562,13 +482,10 @@ github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9 github.com/ipfs/go-log v1.0.2/go.mod h1:1MNjMxe0u6xvJZgeqbJ8vdo2TKaGwZ1a0Bpza+sr2Sk= github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= github.com/ipfs/go-log v1.0.4/go.mod h1:oDCg2FkjogeFOhqqb+N39l2RpTNPL6F/StPkB3kPgcs= -github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= -github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= -github.com/ipfs/go-log/v2 v2.3.0/go.mod h1:QqGoj30OTpnKaG/LKTGTxoP2mmQtjVMEnK72gynbe/g= github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= @@ -590,14 +507,12 @@ github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= @@ -605,20 +520,17 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= github.com/karalabe/usb v0.0.0-20191104083709-911d15fe12a9/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= -github.com/karalabe/usb v0.0.0-20210518091819-4ea20957c210/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= github.com/kardianos/service v1.2.0/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= @@ -627,10 +539,8 @@ github.com/klauspost/compress v1.10.1/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg= github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/cpuid v1.2.3 h1:CCtW0xUnWGVINKvE/WWOYKdsPV6mawAtvQuSl8guwQs= github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.11 h1:i2lw1Pm7Yi/4O6XCSyJWqEHI2MDw2FzUK6o/D21xn2A= github.com/klauspost/cpuid/v2 v2.0.11/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= @@ -663,14 +573,10 @@ github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2 github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= -github.com/libp2p/go-addr-util v0.1.0/go.mod h1:6I3ZYuFr2O/9D+SoyM0zEw0EF3YkldtTX406BpdQMqw= github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= -github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= github.com/libp2p/go-conn-security-multistream v0.2.0/go.mod h1:hZN4MjlNetKD3Rq5Jb/P5ohUnFLNzEAR4DLSzpn2QLU= -github.com/libp2p/go-conn-security-multistream v0.2.1/go.mod h1:cR1d8gA0Hr59Fj6NhaTpFhJZrjSYuNmhpT2r25zYR70= -github.com/libp2p/go-conn-security-multistream v0.3.0/go.mod h1:EEP47t4fw/bTelVmEzIDqSe69hO/ip52xBEhZMLWAHM= github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= github.com/libp2p/go-eventbus v0.2.1/go.mod h1:jc2S4SoEVPP48H9Wpzm5aiGwUCBMfGhVhhBjyhhCJs8= github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= @@ -680,14 +586,11 @@ github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xS github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= github.com/libp2p/go-libp2p v0.13.0/go.mod h1:pM0beYdACRfHO1WcJlp65WXyG2A6NqYM+t2DTVAJxMo= -github.com/libp2p/go-libp2p v0.16.0/go.mod h1:ump42BsirwAWxKzsCiFnTtN1Yc+DuPu76fyMX364/O4= -github.com/libp2p/go-libp2p-asn-util v0.1.0/go.mod h1:wu+AnM9Ii2KgO5jMmS1rz9dvzTdj8BXqsPR9HR0XB7I= github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI= github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= github.com/libp2p/go-libp2p-autonat v0.4.0/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= -github.com/libp2p/go-libp2p-autonat v0.6.0/go.mod h1:bFC6kY8jwzNNWoqc8iGE57vsfwyJ/lP4O4DOV1e0B2o= github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= github.com/libp2p/go-libp2p-blankhost v0.2.0/go.mod h1:eduNKXGTioTuQAUcZ5epXi9vMl+t4d8ugUBRQ4SqaNQ= @@ -711,17 +614,10 @@ github.com/libp2p/go-libp2p-core v0.5.7/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX github.com/libp2p/go-libp2p-core v0.6.0/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= github.com/libp2p/go-libp2p-core v0.7.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.6/go.mod h1:dgHr0l0hIKfWpGpqAMbpo19pen9wJfdCGv51mTmdpmM= -github.com/libp2p/go-libp2p-core v0.9.0/go.mod h1:ESsbz31oC3C1AvMJoGx26RTuCkNhmkSRCqZ0kQtJ2/8= -github.com/libp2p/go-libp2p-core v0.10.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= -github.com/libp2p/go-libp2p-core v0.11.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw= github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= -github.com/libp2p/go-libp2p-discovery v0.6.0/go.mod h1:/u1voHt0tKIe5oIA1RHBKQLVCWPna2dXmPNHc2zR9S8= github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo= github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= @@ -731,11 +627,9 @@ github.com/libp2p/go-libp2p-mplex v0.4.0/go.mod h1:yCyWJE2sc6TBTnFpjvLuEJgTSw/u+ github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g= github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= github.com/libp2p/go-libp2p-nat v0.0.6/go.mod h1:iV59LVhB3IkFvS6S6sauVTSOrNEANnINbI/fkaLimiw= -github.com/libp2p/go-libp2p-nat v0.1.0/go.mod h1:DQzAG+QbDYjN1/C3B6vXucLtz3u9rEonLVPtZVzQqks= github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM= github.com/libp2p/go-libp2p-noise v0.1.2/go.mod h1:9B10b7ueo7TIxZHHcjcDCo5Hd6kfKT2m77by82SFRfE= -github.com/libp2p/go-libp2p-noise v0.3.0/go.mod h1:JNjHbociDJKHD64KTkzGnzqJ0FEV5gHJa6AB00kbCNQ= github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY= github.com/libp2p/go-libp2p-peerstore v0.1.3/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI= @@ -743,11 +637,8 @@ github.com/libp2p/go-libp2p-peerstore v0.2.0/go.mod h1:N2l3eVIeAitSg3Pi2ipSrJYnq github.com/libp2p/go-libp2p-peerstore v0.2.1/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= -github.com/libp2p/go-libp2p-peerstore v0.4.0/go.mod h1:rDJUFyzEWPpXpEwywkcTYYzDHlwza8riYMaUzaN6hX0= github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= -github.com/libp2p/go-libp2p-quic-transport v0.13.0/go.mod h1:39/ZWJ1TW/jx1iFkKzzUg00W6tDJh73FC0xYudjr7Hc= -github.com/libp2p/go-libp2p-quic-transport v0.15.0/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ= github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8= @@ -758,7 +649,6 @@ github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHv github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= github.com/libp2p/go-libp2p-swarm v0.4.0/go.mod h1:XVFcO52VoLoo0eitSxNQWYq4D6sydGOweTOAjJNraCw= -github.com/libp2p/go-libp2p-swarm v0.8.0/go.mod h1:sOMp6dPuqco0r0GHTzfVheVBh6UEL0L1lXUZ5ot2Fvc= github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= @@ -767,17 +657,11 @@ github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eq github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= -github.com/libp2p/go-libp2p-testing v0.4.2/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= -github.com/libp2p/go-libp2p-testing v0.5.0/go.mod h1:QBk8fqIL1XNcno/l3/hhaIEn4aLRijpYOR+zVjjlh+A= github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= -github.com/libp2p/go-libp2p-tls v0.3.0/go.mod h1:fwF5X6PWGxm6IDRwF3V8AVCCj/hOd5oFlg+wo2FxJDY= -github.com/libp2p/go-libp2p-tls v0.3.1/go.mod h1:fwF5X6PWGxm6IDRwF3V8AVCCj/hOd5oFlg+wo2FxJDY= github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= github.com/libp2p/go-libp2p-transport-upgrader v0.4.0/go.mod h1:J4ko0ObtZSmgn5BX5AmegP+dK3CSnU2lMCKsSq/EY0s= -github.com/libp2p/go-libp2p-transport-upgrader v0.4.3/go.mod h1:bpkldbOWXMrXhpZbSV1mQxTrefOg2Fi+k1ClDSA4ppw= -github.com/libp2p/go-libp2p-transport-upgrader v0.5.0/go.mod h1:Rc+XODlB3yce7dvFV4q/RmyJGsFcCZRkeZMu/Zdg0mo= github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= github.com/libp2p/go-libp2p-yamux v0.2.2/go.mod h1:lIohaR0pT6mOt0AZ0L2dFze9hds9Req3OfS+B+dv4qw= github.com/libp2p/go-libp2p-yamux v0.2.5/go.mod h1:Zpgj6arbyQrmZ3wxSZxfBmbdnWtbZ48OpsfmQVTErwA= @@ -786,7 +670,6 @@ github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2Ez github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30= github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po= github.com/libp2p/go-libp2p-yamux v0.5.1/go.mod h1:dowuvDu8CRWmr0iqySMiSxK+W0iL5cMVO9S94Y6gkv4= -github.com/libp2p/go-libp2p-yamux v0.6.0/go.mod h1:MRhd6mAYnFRnSISp4M8i0ClV/j+mWHo2mYLifWGw33k= github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= github.com/libp2p/go-maddr-filter v0.1.0/go.mod h1:VzZhTXkMucEGGEOSKddrwGiOv0tUhgnKqNEmIAz/bPU= @@ -799,15 +682,11 @@ github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA= -github.com/libp2p/go-msgio v0.1.0/go.mod h1:eNlv2vy9V2X/kNldcZ+SShFE++o2Yjxwx6RAYsmgJnE= github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= -github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC3uRBM= github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.4/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= -github.com/libp2p/go-netroute v0.1.5/go.mod h1:V1SR3AaECRkEQCoFFzYwVYWvYIEtlxx89+O3qcpCl4A= -github.com/libp2p/go-netroute v0.1.6/go.mod h1:AqhkMh0VuWmfgtxKPp3Oc1LdU5QSWS7wl0QLhSZqXxQ= github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= @@ -815,14 +694,11 @@ github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= -github.com/libp2p/go-reuseport v0.1.0/go.mod h1:bQVn9hmfcTaoo0c9v5pBhOarsU1eNOBZdaAd2hzXRKU= github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= -github.com/libp2p/go-reuseport-transport v0.1.0/go.mod h1:vev0C0uMkzriDY59yFHD9v+ujJvYmDQVLowvAjEOmfw= github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-sockaddr v0.1.1/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc= github.com/libp2p/go-stream-muxer-multistream v0.3.0/go.mod h1:yDh8abSIzmZtqtOt64gFJUXEryejzNb0lisTt+fAMJA= @@ -830,11 +706,9 @@ github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2 github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0= github.com/libp2p/go-tcp-transport v0.2.1/go.mod h1:zskiJ70MEfWz2MKxvFB/Pv+tPIB1PpPUrHIWQ8aFw7M= -github.com/libp2p/go-tcp-transport v0.4.0/go.mod h1:0y52Rwrn4076xdJYu/51/qJIdxz+EWDAOG2S45sV3VI= github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM= github.com/libp2p/go-ws-transport v0.3.0/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= github.com/libp2p/go-ws-transport v0.4.0/go.mod h1:EcIEKqf/7GDjth6ksuS/6p7R49V4CBY6/E7R/iyhYUA= -github.com/libp2p/go-ws-transport v0.5.0/go.mod h1:I2juo1dNTbl8BKSBYo98XY85kU2xds1iamArLvl8kNg= github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= @@ -843,16 +717,9 @@ github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/h github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= github.com/libp2p/go-yamux/v2 v2.0.0/go.mod h1:NVWira5+sVUIU6tu1JWvaRn1dRnG+cawOJiflsAM+7U= -github.com/libp2p/go-yamux/v2 v2.3.0/go.mod h1:iTU+lOIn/2h0AgKcL49clNTwfEw+WSfDYrXe05EyKIs= -github.com/libp2p/zeroconf/v2 v2.1.1/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lucas-clemente/quic-go v0.15.2/go.mod h1:qxmO5Y4ZMhdNkunGfxuZnZXnJwYpW9vjQkyrZ7BsgUI= github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= -github.com/lucas-clemente/quic-go v0.23.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0= -github.com/lucas-clemente/quic-go v0.24.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -860,15 +727,14 @@ github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/marten-seemann/qpack v0.1.0/go.mod h1:LFt1NU/Ptjip0C2CPkhimBz5CGE3WGDAUWqna+CNTrI= github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= github.com/marten-seemann/qtls v0.8.0/go.mod h1:Lao6jDqlCfxyLKYFmZXGm2LSHBgVn+P+ROOex6YkT+k= github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= -github.com/marten-seemann/qtls-go1-15 v0.1.4/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= -github.com/marten-seemann/qtls-go1-16 v0.1.4/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= -github.com/marten-seemann/qtls-go1-17 v0.1.0/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8= -github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= github.com/matryer/try v0.0.0-20161228173917-9ac251b645a2/go.mod h1:0KeJpeMD6o+O4hW7qJOT7vyQPKrWmj26uf5wMc/IiIs= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -892,7 +758,6 @@ github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcME github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.13 h1:qdl+GuBjcsKKDco5BsxPJlId98mSWNKqYA+Co0SC1yA= github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= @@ -911,13 +776,8 @@ github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3N github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= github.com/miguelmota/go-ethereum-hdwallet v0.1.1 h1:zdXGlHao7idpCBjEGTXThVAtMKs+IxAgivZ75xqkWK0= github.com/miguelmota/go-ethereum-hdwallet v0.1.1/go.mod h1:f9m9uXokAHA6WNoYOPjj4AqjJS5pquQRiYYj/XSyPYc= -github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= -github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU= -github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= @@ -969,13 +829,9 @@ github.com/multiformats/go-multiaddr v0.2.1/go.mod h1:s/Apk6IyxfvMjDafnhJgJ3/46z github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y= github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= -github.com/multiformats/go-multiaddr v0.3.2/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= -github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= -github.com/multiformats/go-multiaddr v0.4.0/go.mod h1:YcpyLH8ZPudLxQlemYBPhSm0/oCXAT8Z4mzFpyoPyRc= github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= -github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q= github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= github.com/multiformats/go-multiaddr-net v0.0.1/go.mod h1:nw6HSxNmCIQH27XPGBuX+d1tnvM7ihcFwHMSstNAVUU= @@ -1001,33 +857,19 @@ github.com/multiformats/go-multihash v0.1.0/go.mod h1:RJlXsxt6vHGaia+S8We0Erjhoj github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= github.com/multiformats/go-multistream v0.2.0/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= -github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= -github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2W/KhfNY= github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= @@ -1042,8 +884,6 @@ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -1052,23 +892,14 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/openconfig/gnmi v0.0.0-20190823184014-89b2bf29312c/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc= github.com/openconfig/reference v0.0.0-20190727015836-8dfd928c9696/go.mod h1:ym2A+zigScwkSEb/cVQB0/ZMpU3rqiH6X7WRRsxgOGw= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/oschwald/maxminddb-golang v1.3.1/go.mod h1:3jhIUymTJ5VREKyIhWm66LJiQt04F0UCDdodShpjWsY= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= @@ -1076,74 +907,54 @@ github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtP github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.8.0 h1:Keo9qb7iRJs2voHvunFtuuYFsbWeOBh8/P9v/kVMFtw= github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= -github.com/peterh/liner v1.2.1/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= github.com/pkg/term v0.0.0-20200520122047-c3ffed290a03 h1:pd4YKIqCB0U7O2I4gWHgEUA2mCEOENmco0l/bM957bU= github.com/pkg/term v0.0.0-20200520122047-c3ffed290a03/go.mod h1:Z9+Ul5bCbBKnbCvdOWbLqTHhJiYV414CURZJba6L8qA= +github.com/plexsysio/taskmanager v0.0.0-20211220123746-de5ebdd49ae2 h1:Y3ImPze8NO2iKaPFJ0LkqNFlkC8LjifxxxwEGxbkQ+A= +github.com/plexsysio/taskmanager v0.0.0-20211220123746-de5ebdd49ae2/go.mod h1:mrUMc3N31sq3lEqDyCkbw0dBfOtNZyh+z8kHnNnpd7k= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= -github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.10/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/prometheus/tsdb v0.10.0 h1:If5rVCMTp6W2SiRAQFlbpJNgVlgMEd+U2GZckwK38ic= github.com/prometheus/tsdb v0.10.0/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= @@ -1152,7 +963,6 @@ github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRr github.com/rjeczalik/notify v0.9.2 h1:MiTWrPj55mNDHEiIX5YUSKefw/+lCQVoAFmD6oQm5w8= github.com/rjeczalik/notify v0.9.2/go.mod h1:aErll2f0sUX9PXZnVNyeiObbmTlk5jnMoCa4QEjJeqM= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= @@ -1161,7 +971,6 @@ github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubr github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= @@ -1169,7 +978,6 @@ github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfP github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v2.20.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/gopsutil v3.21.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= @@ -1206,7 +1014,6 @@ github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIK github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= @@ -1228,7 +1035,6 @@ github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHN github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -1242,9 +1048,6 @@ github.com/status-im/keycard-go v0.0.0-20200402102358-957c09536969 h1:Oo2KZNP70K github.com/status-im/keycard-go v0.0.0-20200402102358-957c09536969/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw= github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -1253,10 +1056,16 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/swaggo/files v0.0.0-20220728132757-551d4a08d97a h1:kAe4YSu0O0UFn1DowNo2MY5p6xzqtJ/wQ7LZynSvGaY= +github.com/swaggo/files v0.0.0-20220728132757-551d4a08d97a/go.mod h1:lKJPbtWzJ9JhsTN1k1gZgleJWY/cqq0psdoMmaThG3w= +github.com/swaggo/http-swagger v1.3.3 h1:Hu5Z0L9ssyBLofaama21iYaF2VbWyA8jdohaaCGpHsc= +github.com/swaggo/http-swagger v1.3.3/go.mod h1:sE+4PjD89IxMPm77FnkDz0sdO+p5lbXzrVWT6OTVVGo= +github.com/swaggo/swag v1.8.7 h1:2K9ivTD3teEO+2fXV6zrZKDqk5IuU2aJtBDo8U7omWU= +github.com/swaggo/swag v1.8.7/go.mod h1:ezQVUUhly8dludpVk+/PuwJWvLLanB13ygV5Pr9enSk= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= @@ -1274,14 +1083,12 @@ github.com/tinygrasshopper/bettercsv v0.0.1/go.mod h1:0pXjg6Vm8+zAkvosNH2S0dx8gc github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tjfoc/gmsm v1.3.0/go.mod h1:HaUcFuY0auTiaHB9MHFGCPx5IaLhTUd2atbCFBQXn9w= github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= -github.com/tklauser/go-sysconf v0.3.6/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= github.com/tklauser/go-sysconf v0.3.9 h1:JeUVdAOWhhxVcU6Eqr/ATFHgXk/mmiItdKeJPev3vTo= github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o= github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= github.com/tyler-smith/go-bip39 v1.0.2/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= @@ -1291,7 +1098,6 @@ github.com/uber/jaeger-client-go v2.24.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMW github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= @@ -1299,10 +1105,7 @@ github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPU github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= -github.com/vmihailenco/msgpack/v5 v5.3.4/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= -github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= github.com/wealdtech/go-ens/v3 v3.4.3/go.mod h1:FnN14vvOXQmsPbJ7HG8C9/2vexu0DkMnLpjxIMpiNws= -github.com/wealdtech/go-ens/v3 v3.5.1/go.mod h1:bVuYoWYEEeEu7Zy95rIMjPR34QFJarxt8p84ywSo0YM= github.com/wealdtech/go-ens/v3 v3.5.3 h1:lHCUA3j5INsIN1VxDixN/M2ELNrIXO/OWFrsWbpQpwo= github.com/wealdtech/go-ens/v3 v3.5.3/go.mod h1:4qs2EEeTmv538RoB8QjLS9w5N1HSXS253qhLyNEShBs= github.com/wealdtech/go-multicodec v1.2.0/go.mod h1:aedGMaTeYkIqi/KCPre1ho5rTb3hGpu/snBOS3GQLw4= @@ -1327,22 +1130,15 @@ github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6Ut github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/xtaci/kcp-go v5.4.20+incompatible/go.mod h1:bN6vIwHQbfHaHtFpEssmWsN45a+AZwO7eyRCmEIbtvE= github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae/go.mod h1:gXtu8J62kEgmN++bm9BVICuT/e8yiLI2KFobd/TRFsE= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= gitlab.com/nolash/go-mockbytes v0.0.7 h1:9XVFpEfY67kGBVJve3uV19kzqORdlo7V+q09OE6Yo54= gitlab.com/nolash/go-mockbytes v0.0.7/go.mod h1:KKOpNTT39j2Eo+P6uUTOncntfeKY6AFh/2CxuD5MpgE= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= @@ -1350,28 +1146,22 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= -go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1389,7 +1179,6 @@ golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -1410,7 +1199,6 @@ golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210813211128-0a44fdfbc16e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220213190939-1e6e3497d506/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= @@ -1427,9 +1215,6 @@ golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm0 golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= @@ -1443,7 +1228,6 @@ golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -1451,13 +1235,13 @@ golang.org/x/mobile v0.0.0-20200801112145-973feb4309de/go.mod h1:skQtrUTUwhdJvXM golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1470,7 +1254,6 @@ golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190228165749-92fc7df08ae7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1483,22 +1266,14 @@ golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200320220750-118fecf932d8/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -1512,20 +1287,13 @@ golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1533,7 +1301,6 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1542,9 +1309,7 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1558,7 +1323,6 @@ golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181031143558-9b800f95dbbc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1577,81 +1341,56 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191206220618-eeba5f6aabab/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200219091948-cb0a6d8edb6c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210108172913-0df2131ae363/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210317225723-c4fcb01b228e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210511113859-b0526f3d8744/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1660,8 +1399,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1705,35 +1445,15 @@ golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200221224223-e1da425f72fd/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1747,7 +1467,6 @@ gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZ google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1755,15 +1474,6 @@ google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEn google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1771,7 +1481,6 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -1781,7 +1490,6 @@ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= @@ -1791,49 +1499,23 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200218151345-dad8c97a84f5/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1846,7 +1528,6 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= gopkg.in/bsm/ratelimit.v1 v1.0.0-20160220154919-db14e161995a/go.mod h1:KF9sEfUPAXdG8Oev9e99iLGnl2uJMjc5B+4y3O7x610= @@ -1856,10 +1537,8 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww= @@ -1881,7 +1560,6 @@ gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFab gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0= gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1894,8 +1572,9 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1904,7 +1583,6 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= @@ -1917,14 +1595,11 @@ resenje.org/jsonhttp v0.2.0/go.mod h1:EDyeguyTWj2fU3D3SCE0qNTgthzyEkHYLM1uu0uikH resenje.org/logging v0.1.5/go.mod h1:1IdoCm3+UwYfsplxDGV2pHCkUrLlQzlWwp4r28XfPx4= resenje.org/marshal v0.1.1/go.mod h1:P7Cla6Ju5CFvW4Y8JbRgWX1Hcy4L1w4qcCsyadO7G94= resenje.org/recovery v0.1.1/go.mod h1:3S6aCVKMJEWsSAb61oZTteaiqkIfQPTr1RdiWnRbhME= -resenje.org/singleflight v0.2.0/go.mod h1:plheHgw2rd77IH3J6aN0Lu2JvMvHXoLknDwb6vN0dsE= resenje.org/web v0.4.3/go.mod h1:GZw/Jt7IGIYlytsyGdAV5CytZnaQu7GV2u1LLuViihc= resenje.org/x v0.2.4/go.mod h1:1b2Xpo29FRc3IMvg/u46/IyjySl5IjvtuSjXTA/AOnk= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/pkg/account/account.go b/pkg/account/account.go index b1f3b732..472ab499 100644 --- a/pkg/account/account.go +++ b/pkg/account/account.go @@ -20,12 +20,9 @@ import ( "bytes" "crypto/aes" "crypto/ecdsa" - "crypto/sha256" "encoding/binary" - "errors" "fmt" "strconv" - "strings" "github.com/btcsuite/btcd/btcec" "github.com/ethereum/go-ethereum/accounts" @@ -33,7 +30,6 @@ import ( "github.com/fairdatasociety/fairOS-dfs/pkg/logging" "github.com/fairdatasociety/fairOS-dfs/pkg/utils" hdwallet "github.com/miguelmota/go-ethereum-hdwallet" - "github.com/tyler-smith/go-bip39" ) const ( @@ -44,8 +40,6 @@ const ( seedSize = 64 ) -var errBlankPassword = errors.New("password cannot be blank") - // Account is used for keeping authenticated logged-in user info in the session type Account struct { wallet *Wallet @@ -65,7 +59,7 @@ type Info struct { // it uses a 12 word BIP-0039 wordlist to create a 12 word mnemonic for every user // and spawns key pais whenever necessary. func New(logger logging.Logger) *Account { - wal := newWalletFromMnemonic("") + wal := newWallet(nil) return &Account{ wallet: wal, userAccount: &Info{}, @@ -86,81 +80,40 @@ func CreateRandomKeyPair(now int64) (*ecdsa.PrivateKey, error) { // CreateUserAccount create a new master account for a user. if a valid mnemonic is // provided it is used, otherwise a new mnemonic is generated. The generated mnemonic is // AES encrypted using the password provided. -func (a *Account) CreateUserAccount(passPhrase, mnemonic string) (string, string, error) { - wal := newWalletFromMnemonic("") +func (a *Account) CreateUserAccount(mnemonic string) (string, []byte, error) { + wal := newWallet(nil) a.wallet = wal acc, mnemonic, err := wal.LoadMnemonicAndCreateRootAccount(mnemonic) if err != nil { - return "", "", err + return "", nil, err } hdw, err := hdwallet.NewFromMnemonic(mnemonic) if err != nil { // skipcq: TCV-001 - return "", "", err + return "", nil, err } // store publicKey, private key and user a.userAccount.privateKey, err = hdw.PrivateKey(acc) if err != nil { // skipcq: TCV-001 - return "", "", err + return "", nil, err } a.userAccount.publicKey, err = hdw.PublicKey(acc) if err != nil { // skipcq: TCV-001 - return "", "", err + return "", nil, err } addrBytes, err := crypto.NewEthereumAddress(a.userAccount.privateKey.PublicKey) if err != nil { // skipcq: TCV-001 - return "", "", err + return "", nil, err } a.userAccount.address.SetBytes(addrBytes) - // store the mnemonic - encryptedMnemonic, err := a.encryptMnemonic(mnemonic, passPhrase) + seed, err := hdwallet.NewSeedFromMnemonic(mnemonic) if err != nil { // skipcq: TCV-001 - return "", "", err - } - a.wallet.encryptedmnemonic = encryptedMnemonic - - return mnemonic, encryptedMnemonic, nil -} - -// LoadUserAccount loads the user account given the encrypted mnemonic and -// password. -func (a *Account) LoadUserAccount(passPhrase, encryptedMnemonic string) error { - password := passPhrase - if password == "" { - return errBlankPassword - } - - a.wallet.encryptedmnemonic = encryptedMnemonic - plainMnemonic, err := a.wallet.decryptMnemonic(password) - if err != nil { - return fmt.Errorf("invalid password") + return "", nil, err } - acc, err := a.wallet.CreateAccount(rootPath, plainMnemonic) - if err != nil { // skipcq: TCV-001 - return err - } - - hdw, err := hdwallet.NewFromMnemonic(plainMnemonic) - if err != nil { // skipcq: TCV-001 - return err - } - a.userAccount.privateKey, err = hdw.PrivateKey(acc) - if err != nil { // skipcq: TCV-001 - return err - } - a.userAccount.publicKey, err = hdw.PublicKey(acc) - if err != nil { // skipcq: TCV-001 - return err - } - addrBytes, err := crypto.NewEthereumAddress(a.userAccount.privateKey.PublicKey) - if err != nil { // skipcq: TCV-001 - return err - } - a.userAccount.address.SetBytes(addrBytes) - return nil + return mnemonic, seed, nil } // LoadUserAccountFromSeed loads the user account given the bip39 seed @@ -189,43 +142,9 @@ func (a *Account) LoadUserAccountFromSeed(seed []byte) error { return nil } -// Authorise is used to check if the given password is valid for a user account. -// this is done by decrypting the mnemonic using the supplied password and checking -// the validity of the mnemonic to see if it confirms to bip-0039 list of words. -func (a *Account) Authorise(password string) bool { - if password == "" { - a.logger.Errorf(errBlankPassword.Error()) - return false - } - - /* - TODO this is just a temporary fix, in future when mnemonic logic will be removed, - we have to remove Authorise logic or come up with something to check password validity - */ - if a.wallet.seed != nil { - return true - } - plainMnemonic, err := a.wallet.decryptMnemonic(password) - if err != nil { - return false - } - // check the validity of the mnemonic - if plainMnemonic == "" { // skipcq: TCV-001 - return false - } - words := strings.Split(plainMnemonic, " ") - if len(words) != 12 { // skipcq: TCV-001 - return false - } - if !bip39.IsMnemonicValid(plainMnemonic) { // skipcq: TCV-001 - return false - } - return true -} - // CreatePodAccount is used to create a new key pair from the master mnemonic. this key pair is // used as the base key pair for a newly created pod. -func (a *Account) CreatePodAccount(accountId int, passPhrase string, createPod bool) (*Info, error) { +func (a *Account) CreatePodAccount(accountId int, createPod bool) (*Info, error) { if acc, ok := a.podAccounts[accountId]; ok { // skipcq: TCV-001 return acc, nil } @@ -245,25 +164,6 @@ func (a *Account) CreatePodAccount(accountId int, passPhrase string, createPod b if err != nil { // skipcq: TCV-001 return nil, err } - } else { - password := passPhrase - if password == "" { // skipcq: TCV-001 - return nil, errBlankPassword - } - - plainMnemonic, err := a.wallet.decryptMnemonic(password) - if err != nil { // skipcq: TCV-001 - return nil, fmt.Errorf("invalid password") - } - - acc, err = a.wallet.CreateAccount(path, plainMnemonic) - if err != nil { // skipcq: TCV-001 - return nil, err - } - hdw, err = hdwallet.NewFromMnemonic(plainMnemonic) - if err != nil { // skipcq: TCV-001 - return nil, err - } } accountInfo.privateKey, err = hdw.PrivateKey(acc) @@ -356,8 +256,7 @@ func (*Info) PadSeed(seed []byte, passphrase string) ([]byte, error) { chunkData := make([]byte, 0, utils.MaxChunkLength) chunkData = append(chunkData, seed...) chunkData = append(chunkData, randomBytes...) - aesKey := sha256.Sum256([]byte(passphrase)) - encryptedBytes, err := encryptBytes(aesKey[:], chunkData) + encryptedBytes, err := utils.EncryptBytes([]byte(passphrase), chunkData) if err != nil { // skipcq: TCV-001 return nil, fmt.Errorf("mnemonic padding failed: %w", err) } @@ -366,28 +265,10 @@ func (*Info) PadSeed(seed []byte, passphrase string) ([]byte, error) { // RemovePadFromSeed removes the padding of random elements from the given data and returns the seed func (*Info) RemovePadFromSeed(paddedSeed []byte, passphrase string) ([]byte, error) { - aesKey := sha256.Sum256([]byte(passphrase)) - decryptedBytes, err := decryptBytes(aesKey[:], paddedSeed) + decryptedBytes, err := utils.DecryptBytes([]byte(passphrase), paddedSeed) if err != nil { // skipcq: TCV-001 return nil, fmt.Errorf("seed decryption failed: %w", err) } return decryptedBytes[:seedSize], nil } - -func (*Account) encryptMnemonic(mnemonic, passPhrase string) (string, error) { - // get the password and hash it to 256 bits - password := passPhrase - if password == "" { // skipcq: TCV-001 - return "", errBlankPassword - } - aesKey := sha256.Sum256([]byte(password)) - - // encrypt the mnemonic - encryptedMessage, err := encrypt(aesKey[:], mnemonic) - if err != nil { // skipcq: TCV-001 - return "", fmt.Errorf("create user account: %w", err) - } - - return encryptedMessage, nil -} diff --git a/pkg/account/account_test.go b/pkg/account/account_test.go index b2fb701b..0387559e 100644 --- a/pkg/account/account_test.go +++ b/pkg/account/account_test.go @@ -19,9 +19,7 @@ package account import ( "bytes" "io" - "io/ioutil" "os" - "strings" "testing" "github.com/fairdatasociety/fairOS-dfs/pkg/logging" @@ -30,39 +28,28 @@ import ( ) func TestAccount_CreateRootAccount(t *testing.T) { - tempDir, err := ioutil.TempDir("", "pod") + tempDir, err := os.MkdirTemp("", "pod") if err != nil { t.Fatal(err) } - password := "letmein" logger := logging.New(io.Discard, 0) acc := New(logger) - _, _, err = acc.CreateUserAccount(password, "invalid mnemonic that we are passing to check create account error message") + _, _, err = acc.CreateUserAccount("invalid mnemonic that we are passing to check create account error message") if err == nil { t.Fatal("invalid mnemonic passed") } - _, _, err = acc.CreateUserAccount(password, "") + _, _, err = acc.CreateUserAccount("") if err != nil { t.Fatal(err) } - if acc.wallet == nil || acc.wallet.encryptedmnemonic == "" { + if acc.wallet == nil || acc.wallet.seed == nil { t.Fatal("wallet creation error") } - plainMnemonic, err := acc.wallet.decryptMnemonic(password) - if err != nil { - t.Fatal(err) - } - - words := strings.Split(plainMnemonic, " ") - if len(words) != 12 { - t.Fatal("mnemonic is not 12 words") - } - if acc.userAccount.GetPrivateKey() == nil || acc.userAccount.GetPublicKey() == nil || len(acc.userAccount.address[:]) != utils.AddressLength { t.Fatalf("keys not intialised") } @@ -74,67 +61,17 @@ func TestAccount_CreateRootAccount(t *testing.T) { } func TestAuthorise(t *testing.T) { - tempDir, err := ioutil.TempDir("", "pod") + tempDir, err := os.MkdirTemp("", "pod") if err != nil { t.Fatal(err) } - - password := "letmein" logger := logging.New(io.Discard, 0) acc := New(logger) - _, _, err = acc.CreateUserAccount(password, "") + _, _, err = acc.CreateUserAccount("") if err != nil { t.Fatal(err) } - authorised := acc.Authorise("") - if authorised { - t.Fatal("authorised with blank password") - } - authorised = acc.Authorise("wrong password") - if authorised { - t.Fatal("authorised with wrong password") - } - authorised = acc.Authorise(password) - if !authorised { - t.Fatal("authorisation failed") - } - - err = os.RemoveAll(tempDir) - if err != nil { - t.Fatal(err) - } -} - -func TestLoadAndStoreMnemonic(t *testing.T) { - tempDir, err := ioutil.TempDir("", "pod") - if err != nil { - t.Fatal(err) - } - password := "letmein" - logger := logging.New(io.Discard, 0) - acc := New(logger) - _, em, err := acc.CreateUserAccount(password, "") - if err != nil { - t.Fatal(err) - } - - expectedMnemonic, err := acc.wallet.decryptMnemonic(password) - if err != nil { - t.Fatal(err) - } - - acc.wallet.encryptedmnemonic = em - - gotMnemonic, err := acc.wallet.decryptMnemonic(password) - if err != nil { - t.Fatal(err) - } - - if gotMnemonic != expectedMnemonic { - t.Fatalf("mnemonics does not match. expected %s and got %s", expectedMnemonic, gotMnemonic) - } - err = os.RemoveAll(tempDir) if err != nil { t.Fatal(err) @@ -155,64 +92,21 @@ func TestCreateRandomKeyPair(t *testing.T) { } } -func TestLoadUserAccount(t *testing.T) { - tempDir, err := ioutil.TempDir("", "pod") - if err != nil { - t.Fatal(err) - } - password := "letmein" - logger := logging.New(io.Discard, 0) - acc := New(logger) - _, em, err := acc.CreateUserAccount(password, "") - if err != nil { - t.Fatal(err) - } - - acc.wallet.encryptedmnemonic = em - - acc2 := New(logger) - err = acc2.LoadUserAccount("", em) - if err == nil { - t.Fatal("blank password") - } - err = acc2.LoadUserAccount("asdasd", em) - if err == nil { - t.Fatal("wrong password password") - } - - err = acc2.LoadUserAccount(password, em) - if err != nil { - t.Fatal(err) - } - if acc.userAccount.address != acc2.userAccount.address { - t.Fatal("address do not match") - } - err = os.RemoveAll(tempDir) - if err != nil { - t.Fatal(err) - } -} - func TestLoadUserAccountFromSeed(t *testing.T) { - tempDir, err := ioutil.TempDir("", "pod") + tempDir, err := os.MkdirTemp("", "pod") if err != nil { t.Fatal(err) } - password := "letmein" logger := logging.New(io.Discard, 0) acc := New(logger) - m, em, err := acc.CreateUserAccount(password, "") + _, seed, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } - acc.wallet.encryptedmnemonic = em + acc.wallet.seed = seed acc2 := New(logger) - seed, err := hdwallet.NewSeedFromMnemonic(m) - if err != nil { - t.Fatal(err) - } err = acc2.LoadUserAccountFromSeed([]byte{}) if err == nil { t.Fatal("nil seed provided") @@ -232,7 +126,7 @@ func TestLoadUserAccountFromSeed(t *testing.T) { } func TestPadUnpadSeed(t *testing.T) { - tempDir, err := ioutil.TempDir("", "pod") + tempDir, err := os.MkdirTemp("", "pod") if err != nil { t.Fatal(err) } @@ -240,16 +134,12 @@ func TestPadUnpadSeed(t *testing.T) { password := "letmein" logger := logging.New(io.Discard, 0) acc := New(logger) - m, em, err := acc.CreateUserAccount(password, "") + _, seed, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } - acc.wallet.encryptedmnemonic = em - seed, err := hdwallet.NewSeedFromMnemonic(m) - if err != nil { - t.Fatal(err) - } + acc.wallet.seed = seed r, err := acc.userAccount.PadSeed(seed, password) if err != nil { t.Fatal(err) @@ -270,24 +160,23 @@ func TestPadUnpadSeed(t *testing.T) { } func TestCreatePodAccount(t *testing.T) { - tempDir, err := ioutil.TempDir("", "pod") + tempDir, err := os.MkdirTemp("", "pod") if err != nil { t.Fatal(err) } - password := "letmein" logger := logging.New(io.Discard, 0) acc := New(logger) - _, em, err := acc.CreateUserAccount(password, "") + _, seed, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } - acc.wallet.encryptedmnemonic = em - pod1AccountInfo, err := acc.CreatePodAccount(1, password, false) + acc.wallet.seed = seed + pod1AccountInfo, err := acc.CreatePodAccount(1, false) if err != nil { t.Fatal(err) } - pod2AccountInfo, err := acc.CreatePodAccount(2, password, false) + pod2AccountInfo, err := acc.CreatePodAccount(2, false) if err != nil { t.Fatal(err) } @@ -303,33 +192,29 @@ func TestCreatePodAccount(t *testing.T) { } func TestCreatePodAccountWithSeed(t *testing.T) { - tempDir, err := ioutil.TempDir("", "pod") + tempDir, err := os.MkdirTemp("", "pod") if err != nil { t.Fatal(err) } - password := "letmein" logger := logging.New(io.Discard, 0) acc := New(logger) - m, em, err := acc.CreateUserAccount(password, "") + _, seed, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } - acc.wallet.encryptedmnemonic = em - seed, err := hdwallet.NewSeedFromMnemonic(m) - if err != nil { - t.Fatal(err) - } + acc.wallet.seed = seed + acc2 := New(logger) err = acc2.LoadUserAccountFromSeed(seed) if err != nil { t.Fatal(err) } - pod1AccountInfo, err := acc2.CreatePodAccount(1, password, false) + pod1AccountInfo, err := acc2.CreatePodAccount(1, false) if err != nil { t.Fatal(err) } - pod2AccountInfo, err := acc2.CreatePodAccount(2, password, false) + pod2AccountInfo, err := acc2.CreatePodAccount(2, false) if err != nil { t.Fatal(err) } @@ -345,33 +230,32 @@ func TestCreatePodAccountWithSeed(t *testing.T) { } func TestGetAddress(t *testing.T) { - tempDir, err := ioutil.TempDir("", "pod") + tempDir, err := os.MkdirTemp("", "pod") if err != nil { t.Fatal(err) } - password := "letmein" logger := logging.New(io.Discard, 0) acc := New(logger) - m, em, err := acc.CreateUserAccount(password, "") + m, seed, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } - acc.wallet.encryptedmnemonic = em - seed, err := hdwallet.NewSeedFromMnemonic(m) + acc.wallet.seed = seed + seed2, err := hdwallet.NewSeedFromMnemonic(m) if err != nil { t.Fatal(err) } acc2 := New(logger) - err = acc2.LoadUserAccountFromSeed(seed) + err = acc2.LoadUserAccountFromSeed(seed2) if err != nil { t.Fatal(err) } - pod1AccountInfo, err := acc2.CreatePodAccount(1, "password", false) + pod1AccountInfo, err := acc2.CreatePodAccount(1, false) if err != nil { t.Fatal(err) } - pod2AccountInfo, err := acc2.CreatePodAccount(2, "password", false) + pod2AccountInfo, err := acc2.CreatePodAccount(2, false) if err != nil { t.Fatal(err) } @@ -393,31 +277,3 @@ func TestGetAddress(t *testing.T) { t.Fatal(err) } } - -func TestLoadSeedFromMnemonic(t *testing.T) { - tempDir, err := ioutil.TempDir("", "pod") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - password := "letmein" - logger := logging.New(io.Discard, 0) - acc := New(logger) - m, em, err := acc.CreateUserAccount(password, "") - if err != nil { - t.Fatal(err) - } - - acc.wallet.encryptedmnemonic = em - seed, err := hdwallet.NewSeedFromMnemonic(m) - if err != nil { - t.Fatal(err) - } - seed2, err := acc.wallet.LoadSeedFromMnemonic(password) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(seed, seed2) { - t.Fatal("seeds do not match") - } -} diff --git a/pkg/account/encrypt.go b/pkg/account/encrypt.go deleted file mode 100644 index 4094768d..00000000 --- a/pkg/account/encrypt.go +++ /dev/null @@ -1,121 +0,0 @@ -/* -Copyright © 2020 FairOS Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package account - -import ( - "crypto/aes" - "crypto/cipher" - "crypto/rand" - "encoding/base64" - "errors" - "io" -) - -func encrypt(key []byte, message string) (encmess string, err error) { - plainText := []byte(message) - - block, err := aes.NewCipher(key) - if err != nil { // skipcq: TCV-001 - return - } - - //IV needs to be unique, but doesn't have to be secure. - //It's common to put it at the beginning of the ciphertext. - cipherText := make([]byte, aes.BlockSize+len(plainText)) - iv := cipherText[:aes.BlockSize] - if _, err = io.ReadFull(rand.Reader, iv); err != nil { // skipcq: TCV-001 - return - } - - stream := cipher.NewCFBEncrypter(block, iv) - stream.XORKeyStream(cipherText[aes.BlockSize:], plainText) - - //returns to base64 encoded string - encmess = base64.URLEncoding.EncodeToString(cipherText) - return -} - -func decrypt(key []byte, securemess string) (decodedmess string, err error) { - cipherText, err := base64.URLEncoding.DecodeString(securemess) - if err != nil { // skipcq: TCV-001 - return - } - - block, err := aes.NewCipher(key) - if err != nil { // skipcq: TCV-001 - return - } - - if len(cipherText) < aes.BlockSize { // skipcq: TCV-001 - err = errors.New("ciphertext block size is too short") - return - } - - //IV needs to be unique, but doesn't have to be secure. - //It's common to put it at the beginning of the ciphertext. - iv := cipherText[:aes.BlockSize] - cipherText = cipherText[aes.BlockSize:] - - stream := cipher.NewCFBDecrypter(block, iv) - // XORKeyStream can work in-place if the two arguments are the same. - stream.XORKeyStream(cipherText, cipherText) - - decodedmess = string(cipherText) - return -} - -func encryptBytes(key, message []byte) ([]byte, error) { - block, err := aes.NewCipher(key) - if err != nil { // skipcq: TCV-001 - return nil, err - } - - //IV needs to be unique, but doesn't have to be secure. - //It's common to put it at the beginning of the ciphertext. - cipherText := make([]byte, aes.BlockSize+len(message)) - iv := cipherText[:aes.BlockSize] - if _, err = io.ReadFull(rand.Reader, iv); err != nil { // skipcq: TCV-001 - return nil, err - } - stream := cipher.NewCFBEncrypter(block, iv) - stream.XORKeyStream(cipherText[aes.BlockSize:], message) - - return cipherText, nil -} - -func decryptBytes(key, cipherText []byte) ([]byte, error) { - block, err := aes.NewCipher(key) - if err != nil { // skipcq: TCV-001 - return nil, err - } - - if len(cipherText) < aes.BlockSize { // skipcq: TCV-001 - err = errors.New("ciphertext block size is too short") - return nil, err - } - - //IV needs to be unique, but doesn't have to be secure. - //It's common to put it at the beginning of the ciphertext. - iv := cipherText[:aes.BlockSize] - cipherText = cipherText[aes.BlockSize:] - - stream := cipher.NewCFBDecrypter(block, iv) - // XORKeyStream can work in-place if the two arguments are the same. - stream.XORKeyStream(cipherText, cipherText) - - return cipherText, nil -} diff --git a/pkg/account/wallet.go b/pkg/account/wallet.go index 58c14acf..1bace280 100644 --- a/pkg/account/wallet.go +++ b/pkg/account/wallet.go @@ -17,7 +17,6 @@ limitations under the License. package account import ( - "crypto/sha256" "fmt" "strings" @@ -33,13 +32,13 @@ const ( // Wallet is used to create root and pod accounts of user type Wallet struct { - encryptedmnemonic string - seed []byte + seed []byte } -func newWalletFromMnemonic(mnemonic string) *Wallet { - wallet := &Wallet{ - encryptedmnemonic: mnemonic, +func newWallet(seed []byte) *Wallet { + wallet := &Wallet{} + if seed != nil { + wallet.seed = seed } return wallet } @@ -74,8 +73,12 @@ func (w *Wallet) LoadMnemonicAndCreateRootAccount(mnemonic string) (accounts.Acc if err != nil { // skipcq: TCV-001 return accounts.Account{}, "", err } + seed, err := hdwallet.NewSeedFromMnemonic(mnemonic) + if err != nil { // skipcq: TCV-001 + return accounts.Account{}, "", err + } + w.seed = seed return acc, mnemonic, nil - } // CreateAccount is used to create a new hd wallet using the given mnemonic and the walletPath. @@ -119,36 +122,3 @@ func (*Wallet) IsValidMnemonic(mnemonic string) error { } return nil } - -// LoadSeedFromMnemonic loads seed of the Wallet from pre-loaded mnemonic -func (w *Wallet) LoadSeedFromMnemonic(password string) ([]byte, error) { - mnemonic, err := w.decryptMnemonic(password) - if err != nil { - return nil, err - } - seed, err := hdwallet.NewSeedFromMnemonic(mnemonic) - if err != nil { // skipcq: TCV-001 - return nil, err - } - w.seed = seed - return w.seed, nil -} - -func (w *Wallet) decryptMnemonic(password string) (string, error) { - if w.encryptedmnemonic == "" { - return "", fmt.Errorf("invalid encrypted mnemonic") - } - aesKey := sha256.Sum256([]byte(password)) - - //decrypt the message - mnemonic, err := decrypt(aesKey[:], w.encryptedmnemonic) - if err != nil { // skipcq: TCV-001 - return "", err - } - - err = w.IsValidMnemonic(mnemonic) - if err != nil { - return "", fmt.Errorf("invalid password") - } - return mnemonic, nil -} diff --git a/pkg/account/wallet_test.go b/pkg/account/wallet_test.go index ef7e3aa1..a1e710bf 100644 --- a/pkg/account/wallet_test.go +++ b/pkg/account/wallet_test.go @@ -1,25 +1,21 @@ package account import ( - "io" - "io/ioutil" "os" "testing" - "github.com/fairdatasociety/fairOS-dfs/pkg/logging" + hdwallet "github.com/miguelmota/go-ethereum-hdwallet" + "github.com/tyler-smith/go-bip39" ) func TestWallet(t *testing.T) { - tempDir, err := ioutil.TempDir("", "pod") + tempDir, err := os.MkdirTemp("", "pod") if err != nil { t.Fatal(err) } defer os.RemoveAll(tempDir) - password := "letmein" - logger := logging.New(io.Discard, 0) - acc := New(logger) entropy, err := bip39.NewEntropy(128) if err != nil { t.Fatal(err) @@ -28,12 +24,11 @@ func TestWallet(t *testing.T) { if err != nil { t.Fatal(err) } - enMnemonic, err := acc.encryptMnemonic(mnemonic, password) + seed, err := hdwallet.NewSeedFromMnemonic(mnemonic) if err != nil { t.Fatal(err) } - - wallet := newWalletFromMnemonic(enMnemonic) + wallet := newWallet(seed) _, _, err = wallet.LoadMnemonicAndCreateRootAccount("invalid mnemonic that we are passing to check create account error message") if err == nil { t.Fatal("invalid mnemonic") @@ -42,15 +37,4 @@ func TestWallet(t *testing.T) { if err == nil { t.Fatal("invalid mnemonic") } - - _, err = wallet.LoadSeedFromMnemonic("wrongpassword") - if err == nil { - t.Fatal("wrong password") - } - - w := &Wallet{} - _, err = w.LoadSeedFromMnemonic("pass") - if err == nil { - t.Fatal("wrong password") - } } diff --git a/pkg/api/dir_ls.go b/pkg/api/dir_ls.go index 587eecd8..b5e343ba 100644 --- a/pkg/api/dir_ls.go +++ b/pkg/api/dir_ls.go @@ -34,22 +34,32 @@ type ListFileResponse struct { Files []file.Entry `json:"files,omitempty"` } -// DirectoryLsHandler is the api handler for listing the contents of a directory. -// it takes only one argument -// - dir_path: the path of the directory to list it contents +// DirectoryLsHandler godoc +// +// @Summary List directory +// @Description DirectoryLsHandler is the api handler for listing the contents of a directory. +// @Tags dir +// @Produce json +// @Param podName query string true "pod name" +// @Param dirPath query string true "dir path" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} ListFileResponse +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/dir/ls [get] func (h *Handler) DirectoryLsHandler(w http.ResponseWriter, r *http.Request) { - keys, ok := r.URL.Query()["pod_name"] + keys, ok := r.URL.Query()["podName"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("ls: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "ls: \"pod_name\" argument missing"}) + h.logger.Errorf("ls: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "ls: \"podName\" argument missing"}) return } podName := keys[0] - keys, ok = r.URL.Query()["dir_path"] + keys, ok = r.URL.Query()["dirPath"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("ls: \"dir_path\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "ls: \"dir_path\" argument missing"}) + h.logger.Errorf("ls: \"dirPath\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "ls: \"dirPath\" argument missing"}) return } directory := keys[0] diff --git a/pkg/api/dir_mkdir.go b/pkg/api/dir_mkdir.go index 19af7ad0..aad8b71e 100644 --- a/pkg/api/dir_mkdir.go +++ b/pkg/api/dir_mkdir.go @@ -20,8 +20,6 @@ import ( "encoding/json" "net/http" - "github.com/fairdatasociety/fairOS-dfs/cmd/common" - "resenje.org/jsonhttp" "github.com/fairdatasociety/fairOS-dfs/pkg/cookie" @@ -29,9 +27,24 @@ import ( p "github.com/fairdatasociety/fairOS-dfs/pkg/pod" ) -// DirectoryMkdirHandler is the api handler to create a new directory. -// it takes one argument -// - dir-path: the new directory to create along with its absolute path +type DirRequest struct { + PodName string `json:"podName,omitempty"` + DirectoryPath string `json:"dirPath,omitempty"` +} + +// DirectoryMkdirHandler godoc +// +// @Summary Create directory +// @Description DirectoryMkdirHandler is the api handler to create a new directory. +// @Tags dir +// @Accept json +// @Produce json +// @Param dir_request body DirRequest true "pod name and dir path" +// @Param Cookie header string true "cookie parameter" +// @Success 201 {object} response +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/dir/mkdir [post] func (h *Handler) DirectoryMkdirHandler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { @@ -41,7 +54,7 @@ func (h *Handler) DirectoryMkdirHandler(w http.ResponseWriter, r *http.Request) } decoder := json.NewDecoder(r.Body) - var fsReq common.FileSystemRequest + var fsReq DirRequest err := decoder.Decode(&fsReq) if err != nil { h.logger.Errorf("mkdir: could not decode arguments") @@ -51,15 +64,15 @@ func (h *Handler) DirectoryMkdirHandler(w http.ResponseWriter, r *http.Request) podName := fsReq.PodName if podName == "" { - h.logger.Errorf("mkdir: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "mkdir: \"pod_name\" argument missing"}) + h.logger.Errorf("mkdir: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "mkdir: \"podName\" argument missing"}) return } dirToCreateWithPath := fsReq.DirectoryPath if dirToCreateWithPath == "" { - h.logger.Errorf("mkdir: \"dir_path\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "mkdir: \"dir_path\" argument missing"}) + h.logger.Errorf("mkdir: \"dirPath\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "mkdir: \"dirPath\" argument missing"}) return } diff --git a/pkg/api/dir_present.go b/pkg/api/dir_present.go index 7a21de35..b0a8abf1 100644 --- a/pkg/api/dir_present.go +++ b/pkg/api/dir_present.go @@ -29,22 +29,32 @@ type DirPresentResponse struct { Error string `json:"error,omitempty"` } -// DirectoryPresentHandler is the api handler which says if a a directory is present or not -// it takes only one argument -// - dir-path: the directory to check along with its absolute path +// DirectoryPresentHandler godoc +// +// @Summary Is directory present +// @Description DirectoryPresentHandler is the api handler which says if a directory is present or not +// @Tags dir +// @Produce json +// @Param podName query string true "pod name" +// @Param dirPath query string true "dir path" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} DirPresentResponse +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/dir/present [get] func (h *Handler) DirectoryPresentHandler(w http.ResponseWriter, r *http.Request) { - keys, ok := r.URL.Query()["pod_name"] + keys, ok := r.URL.Query()["podName"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("dir present: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "dir present: \"pod_name\" argument missing"}) + h.logger.Errorf("dir present: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "dir present: \"podName\" argument missing"}) return } podName := keys[0] - keys, ok = r.URL.Query()["dir_path"] + keys, ok = r.URL.Query()["dirPath"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("dir present: \"dir_path\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "dir present: \"dir_path\" argument missing"}) + h.logger.Errorf("dir present: \"dirPath\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "dir present: \"dirPath\" argument missing"}) return } dirToCheck := keys[0] diff --git a/pkg/api/dir_rename.go b/pkg/api/dir_rename.go new file mode 100644 index 00000000..7f100065 --- /dev/null +++ b/pkg/api/dir_rename.go @@ -0,0 +1,112 @@ +/* +Copyright © 2020 FairOS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "encoding/json" + "net/http" + + "github.com/fairdatasociety/fairOS-dfs/cmd/common" + + "resenje.org/jsonhttp" + + "github.com/fairdatasociety/fairOS-dfs/pkg/cookie" + "github.com/fairdatasociety/fairOS-dfs/pkg/dfs" + p "github.com/fairdatasociety/fairOS-dfs/pkg/pod" +) + +// DirectoryRenameHandler godoc +// +// @Summary Rename directory +// @Description DirectoryRenameHandler is the api handler to rename a directory. +// @Tags dir +// @Accept json +// @Produce json +// @Param dir_request body common.RenameRequest true "old name and new path" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} response +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/dir/rename [post] +func (h *Handler) DirectoryRenameHandler(w http.ResponseWriter, r *http.Request) { + contentType := r.Header.Get("Content-Type") + if contentType != jsonContentType { + h.logger.Errorf("rename-dir: invalid request body type") + jsonhttp.BadRequest(w, &response{Message: "rename-dir: invalid request body type"}) + return + } + + decoder := json.NewDecoder(r.Body) + var renameReq common.RenameRequest + err := decoder.Decode(&renameReq) + if err != nil { + h.logger.Errorf("rename-dir: could not decode arguments") + jsonhttp.BadRequest(w, &response{Message: "rename-dir: could not decode arguments"}) + return + } + + podName := renameReq.PodName + if podName == "" { + h.logger.Errorf("rename-dir: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "rename-dir: \"podName\" argument missing"}) + return + } + + oldPath := renameReq.OldPath + if oldPath == "" { + h.logger.Errorf("rename-dir: \"oldPath\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "rename-dir: \"oldPath\" argument missing"}) + return + } + + newPath := renameReq.NewPath + if newPath == "" { + h.logger.Errorf("rename-dir: \"newPath\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "rename-dir: \"newPath\" argument missing"}) + return + } + + // get values from cookie + sessionId, err := cookie.GetSessionIdFromCookie(r) + if err != nil { + h.logger.Errorf("rename-dir: invalid cookie: %v", err) + jsonhttp.BadRequest(w, &response{Message: ErrInvalidCookie.Error()}) + return + } + if sessionId == "" { + h.logger.Errorf("rename-dir: \"cookie-id\" parameter missing in cookie") + jsonhttp.BadRequest(w, &response{Message: "rename-dir: \"cookie-id\" parameter missing in cookie"}) + return + } + + // make directory + err = h.dfsAPI.RenameDir(podName, oldPath, newPath, sessionId) + if err != nil { + if err == dfs.ErrPodNotOpen || err == dfs.ErrUserNotLoggedIn || + err == p.ErrInvalidDirectory || + err == p.ErrTooLongDirectoryName || + err == p.ErrPodNotOpened { + h.logger.Errorf("rename-dir: %v", err) + jsonhttp.BadRequest(w, &response{Message: "rename-dir: " + err.Error()}) + return + } + h.logger.Errorf("rename-dir: %v", err) + jsonhttp.InternalServerError(w, &response{Message: "rename-dir: " + err.Error()}) + return + } + jsonhttp.OK(w, &response{Message: "directory renamed successfully"}) +} diff --git a/pkg/api/dir_rmdir.go b/pkg/api/dir_rmdir.go index 124b49db..4eec0fb9 100644 --- a/pkg/api/dir_rmdir.go +++ b/pkg/api/dir_rmdir.go @@ -20,8 +20,6 @@ import ( "encoding/json" "net/http" - "github.com/fairdatasociety/fairOS-dfs/cmd/common" - "resenje.org/jsonhttp" "github.com/fairdatasociety/fairOS-dfs/pkg/cookie" @@ -29,9 +27,19 @@ import ( p "github.com/fairdatasociety/fairOS-dfs/pkg/pod" ) -// DirectoryRmdirHandler is the api handler to remove a directory -// it takes one argument -// - dir-path: the directory to remove along with its absolute path +// DirectoryRmdirHandler godoc +// +// @Summary Remove directory +// @Description DirectoryRmdirHandler is the api handler to remove a directory. +// @Tags dir +// @Accept json +// @Produce json +// @Param dir_request body DirRequest true "pod name and dir path" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} response +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/dir/rmdir [delete] func (h *Handler) DirectoryRmdirHandler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { @@ -41,7 +49,7 @@ func (h *Handler) DirectoryRmdirHandler(w http.ResponseWriter, r *http.Request) } decoder := json.NewDecoder(r.Body) - var fsReq common.FileSystemRequest + var fsReq DirRequest err := decoder.Decode(&fsReq) if err != nil { h.logger.Errorf("rmdir: could not decode arguments") @@ -51,15 +59,15 @@ func (h *Handler) DirectoryRmdirHandler(w http.ResponseWriter, r *http.Request) podName := fsReq.PodName if podName == "" { - h.logger.Errorf("rmdir: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "rmdir: \"pod_name\" argument missing"}) + h.logger.Errorf("rmdir: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "rmdir: \"podName\" argument missing"}) return } dir := fsReq.DirectoryPath if dir == "" { - h.logger.Errorf("rmdir: \"dir_path\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "rmdir: \"dir_path\" argument missing"}) + h.logger.Errorf("rmdir: \"dirPath\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "rmdir: \"dirPath\" argument missing"}) return } diff --git a/pkg/api/dir_stat.go b/pkg/api/dir_stat.go index 09268492..2a0c7c81 100644 --- a/pkg/api/dir_stat.go +++ b/pkg/api/dir_stat.go @@ -26,22 +26,32 @@ import ( p "github.com/fairdatasociety/fairOS-dfs/pkg/pod" ) -// DirectoryStatHandler is the api handler which gives the information about a directory -// it takes one argument -// dir_path: the directory to give info about along with its absolute path +// DirectoryStatHandler godoc +// +// @Summary Directory stat +// @Description DirectoryStatHandler is the api handler which gives the information about a directory +// @Tags dir +// @Produce json +// @Param podName query string true "pod name" +// @Param dirPath query string true "dir path" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} dir.Stats +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/dir/stat [get] func (h *Handler) DirectoryStatHandler(w http.ResponseWriter, r *http.Request) { - keys, ok := r.URL.Query()["pod_name"] + keys, ok := r.URL.Query()["podName"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("dir: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "dir: \"pod_name\" argument missing"}) + h.logger.Errorf("dir: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "dir: \"podName\" argument missing"}) return } podName := keys[0] - keys, ok = r.URL.Query()["dir_path"] + keys, ok = r.URL.Query()["dirPath"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("dir present: \"dir_path\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "dir present: \"dir_path\" argument missing"}) + h.logger.Errorf("dir present: \"dirPath\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "dir present: \"dirPath\" argument missing"}) return } dir := keys[0] diff --git a/pkg/api/doc_count.go b/pkg/api/doc_count.go index 832d32cd..3c424360 100644 --- a/pkg/api/doc_count.go +++ b/pkg/api/doc_count.go @@ -20,17 +20,31 @@ import ( "encoding/json" "net/http" - "github.com/fairdatasociety/fairOS-dfs/cmd/common" - "github.com/fairdatasociety/fairOS-dfs/pkg/cookie" "resenje.org/jsonhttp" ) -// DocCountHandler is the api handler to count the number of documents in -// a given document database -// it takes two arguments -// - table_name: the name of the table to count the rows -// - expr: the expression for selecting certain rows +type DocCountRequest struct { + PodName string `json:"podName,omitempty"` + TableName string `json:"tableName,omitempty"` + SimpleIndex string `json:"si,omitempty"` + Mutable bool `json:"mutable,omitempty"` + Expression string `json:"expr,omitempty"` +} + +// DocCountHandler godoc +// +// @Summary Count number of document in a table +// @Description DocCountHandler is the api handler to count the number of documents in a given document database +// @Tags doc +// @Accept json +// @Produce json +// @Param doc_request body DocCountRequest true "doc table info" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} collection.TableKeyCount +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/doc/count [post] func (h *Handler) DocCountHandler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { @@ -40,7 +54,7 @@ func (h *Handler) DocCountHandler(w http.ResponseWriter, r *http.Request) { } decoder := json.NewDecoder(r.Body) - var docReq common.DocRequest + var docReq DocCountRequest err := decoder.Decode(&docReq) if err != nil { h.logger.Errorf("doc count: could not decode arguments") @@ -50,15 +64,15 @@ func (h *Handler) DocCountHandler(w http.ResponseWriter, r *http.Request) { podName := docReq.PodName if podName == "" { - h.logger.Errorf("doc count: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "doc count: \"pod_name\" argument missing"}) + h.logger.Errorf("doc count: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "doc count: \"podName\" argument missing"}) return } name := docReq.TableName if name == "" { - h.logger.Errorf("doc count: \"table_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "doc count: \"table_name\" argument missing"}) + h.logger.Errorf("doc count: \"tableName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "doc count: \"tableName\" argument missing"}) return } diff --git a/pkg/api/doc_delete.go b/pkg/api/doc_delete.go index 83c8cff0..fccafbc0 100644 --- a/pkg/api/doc_delete.go +++ b/pkg/api/doc_delete.go @@ -20,15 +20,23 @@ import ( "encoding/json" "net/http" - "github.com/fairdatasociety/fairOS-dfs/cmd/common" - "github.com/fairdatasociety/fairOS-dfs/pkg/cookie" "resenje.org/jsonhttp" ) -// DocDeleteHandler is the api handler to delete the given document database -// it takes only one argument -// table_name: the document database to delete +// DocDeleteHandler godoc +// +// @Summary Delete a doc table +// @Description DocDeleteHandler is the api handler to delete the given document database +// @Tags doc +// @Accept json +// @Produce json +// @Param doc_request body SimpleDocRequest true "doc table info" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} response +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/doc/delete [delete] func (h *Handler) DocDeleteHandler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { @@ -38,7 +46,7 @@ func (h *Handler) DocDeleteHandler(w http.ResponseWriter, r *http.Request) { } decoder := json.NewDecoder(r.Body) - var docReq common.DocRequest + var docReq SimpleDocRequest err := decoder.Decode(&docReq) if err != nil { h.logger.Errorf("doc delete: could not decode arguments") @@ -48,15 +56,15 @@ func (h *Handler) DocDeleteHandler(w http.ResponseWriter, r *http.Request) { name := docReq.TableName if name == "" { - h.logger.Errorf("doc delete: \"table_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "doc delete: \"table_name\" argument missing"}) + h.logger.Errorf("doc delete: \"tableName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "doc delete: \"tableName\" argument missing"}) return } podName := docReq.PodName if podName == "" { - h.logger.Errorf("doc delete: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "doc delete: \"pod_name\" argument missing"}) + h.logger.Errorf("doc delete: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "doc delete: \"podName\" argument missing"}) return } diff --git a/pkg/api/doc_find.go b/pkg/api/doc_find.go index fe34e5f3..36b21044 100644 --- a/pkg/api/doc_find.go +++ b/pkg/api/doc_find.go @@ -29,35 +29,46 @@ type DocFindResponse struct { Docs [][]byte `json:"docs"` } -// DocFindHandler is the api handler to select rows from a given document database -// it takes three arguments -// table_name: the daument database from which to select the rows -// expr: the expression which helps in selection particular rows -// limit: the threshold of documents to return in the result +// DocFindHandler godoc +// +// @Summary Get rows from a given doc datastore +// @Description DocFindHandler is the api handler to select rows from a given document datastore +// @Tags doc +// @Accept json +// @Produce json +// @Param podName query string true "pod name" +// @Param tableName query string true "table name" +// @Param expr query string true "expression to search for" +// @Param limit query string false "number od documents" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} DocFindResponse +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/doc/find [get] func (h *Handler) DocFindHandler(w http.ResponseWriter, r *http.Request) { - keys, ok := r.URL.Query()["pod_name"] + keys, ok := r.URL.Query()["podName"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("doc find: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "doc find: \"pod_name\" argument missing"}) + h.logger.Errorf("doc find: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "doc find: \"podName\" argument missing"}) return } podName := keys[0] if podName == "" { - h.logger.Errorf("doc find: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "doc find: \"pod_name\" argument missing"}) + h.logger.Errorf("doc find: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "doc find: \"podName\" argument missing"}) return } - keys, ok = r.URL.Query()["table_name"] + keys, ok = r.URL.Query()["tableName"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("doc find: \"table_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "doc find: \"table_name\" argument missing"}) + h.logger.Errorf("doc find: \"tableName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "doc find: \"tableName\" argument missing"}) return } name := keys[0] if name == "" { - h.logger.Errorf("doc find: \"table_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "doc find: \"table_name\" argument missing"}) + h.logger.Errorf("doc find: \"tableName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "doc find: \"tableName\" argument missing"}) return } diff --git a/pkg/api/doc_indexjson.go b/pkg/api/doc_indexjson.go index 12721c86..e94da955 100644 --- a/pkg/api/doc_indexjson.go +++ b/pkg/api/doc_indexjson.go @@ -20,18 +20,30 @@ import ( "encoding/json" "net/http" - "github.com/fairdatasociety/fairOS-dfs/cmd/common" - "github.com/fairdatasociety/fairOS-dfs/pkg/cookie" "github.com/fairdatasociety/fairOS-dfs/pkg/dfs" "resenje.org/jsonhttp" ) -// DocIndexJsonHandler is the api handler to index a json file that is present -// in a pod, in to the given document database -// it takes two arguments -// table_name: the document database in which to insert the data -// file_name: the file name of the index json with absolute path +type DocIndexRequest struct { + PodName string `json:"podName,omitempty"` + TableName string `json:"tableName,omitempty"` + FileName string `json:"fileName,omitempty"` +} + +// DocIndexJsonHandler godoc +// +// @Summary Index a json file that is present in a pod, in to the given document database +// @Description DocIndexJsonHandler is the api handler to index a json file that is present in a pod, in to the given document database +// @Tags doc +// @Accept json +// @Produce json +// @Param index_request body DocIndexRequest true "index request" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} response +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/doc/indexjson [post] func (h *Handler) DocIndexJsonHandler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { @@ -41,7 +53,7 @@ func (h *Handler) DocIndexJsonHandler(w http.ResponseWriter, r *http.Request) { } decoder := json.NewDecoder(r.Body) - var docReq common.DocRequest + var docReq DocIndexRequest err := decoder.Decode(&docReq) if err != nil { h.logger.Errorf("doc indexjson: could not decode arguments") @@ -51,22 +63,22 @@ func (h *Handler) DocIndexJsonHandler(w http.ResponseWriter, r *http.Request) { podName := docReq.PodName if podName == "" { - h.logger.Errorf("doc indexjson: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "doc indexjson: \"pod_name\" argument missing"}) + h.logger.Errorf("doc indexjson: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "doc indexjson: \"podName\" argument missing"}) return } tableName := docReq.TableName if tableName == "" { - h.logger.Errorf("doc indexjson: \"table_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "doc indexjson: \"table_ame\" argument missing"}) + h.logger.Errorf("doc indexjson: \"tableName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "doc indexjson: \"tableAme\" argument missing"}) return } podFile := docReq.FileName if podFile == "" { - h.logger.Errorf("doc indexjson: \"file_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "doc indexjson: \"file_name\" argument missing"}) + h.logger.Errorf("doc indexjson: \"fileName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "doc indexjson: \"fileName\" argument missing"}) return } diff --git a/pkg/api/doc_loadjson.go b/pkg/api/doc_loadjson.go index 4d16de00..0cd5ad0e 100644 --- a/pkg/api/doc_loadjson.go +++ b/pkg/api/doc_loadjson.go @@ -27,23 +27,33 @@ import ( "resenje.org/jsonhttp" ) -// DocLoadJsonHandler is the api handler that indexes a json file that is present -// in the local file system -// it takes two arguments -// table_name: the document database in which to insert the data -// file: the json file as a multi part file +// DocLoadJsonHandler godoc +// +// @Summary Load json file from local file system +// @Description DocLoadJsonHandler is the api handler that indexes a json file that is present in the local file system +// @Tags doc +// @Accept mpfd +// @Produce json +// @Param podName query string true "pod name" +// @Param tableName query string true "table name" +// @Param json formData file true "json to index" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} response +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/doc/loadjson [post] func (h *Handler) DocLoadJsonHandler(w http.ResponseWriter, r *http.Request) { - podName := r.FormValue("pod_name") + podName := r.FormValue("podName") if podName == "" { - h.logger.Errorf("doc loadjson: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "doc loadjson: \"pod_name\" argument missing"}) + h.logger.Errorf("doc loadjson: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "doc loadjson: \"podName\" argument missing"}) return } - name := r.FormValue("table_name") + name := r.FormValue("tableName") if name == "" { - h.logger.Errorf("doc loadjson: \"table_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "doc loadjson: \"table_name\" argument missing"}) + h.logger.Errorf("doc loadjson: \"tableName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "doc loadjson: \"tableName\" argument missing"}) return } diff --git a/pkg/api/doc_ls.go b/pkg/api/doc_ls.go index 3479e484..f7abb773 100644 --- a/pkg/api/doc_ls.go +++ b/pkg/api/doc_ls.go @@ -30,24 +30,35 @@ type DocumentDBs struct { } type documentDB struct { - Name string `json:"table_name"` + Name string `json:"tableName"` IndexedColumns []collection.SIndex `json:"indexes"` CollectionType string `json:"type"` } -// DocListHandler is the api handler which lists all the document database in a pod -// it takes no arguments +// DocListHandler godoc +// +// @Summary List all doc table +// @Description DocListHandler is the api handler which lists all the document database in a pod +// @Tags doc +// @Accept json +// @Produce json +// @Param podName query string true "pod name" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} DocumentDBs +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/doc/ls [get] func (h *Handler) DocListHandler(w http.ResponseWriter, r *http.Request) { - keys, ok := r.URL.Query()["pod_name"] + keys, ok := r.URL.Query()["podName"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("doc ls: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "doc ls: \"pod_name\" argument missing"}) + h.logger.Errorf("doc ls: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "doc ls: \"podName\" argument missing"}) return } podName := keys[0] if podName == "" { - h.logger.Errorf("doc ls: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "doc ls: \"pod_name\" argument missing"}) + h.logger.Errorf("doc ls: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "doc ls: \"podName\" argument missing"}) return } diff --git a/pkg/api/doc_new.go b/pkg/api/doc_new.go index 0580b33a..b10a8119 100644 --- a/pkg/api/doc_new.go +++ b/pkg/api/doc_new.go @@ -21,18 +21,36 @@ import ( "net/http" "strings" - "github.com/fairdatasociety/fairOS-dfs/cmd/common" - "github.com/fairdatasociety/fairOS-dfs/pkg/collection" "github.com/fairdatasociety/fairOS-dfs/pkg/cookie" "resenje.org/jsonhttp" ) -// DocCreateHandler is the api handler to create a new document database -// it takes 2 mandatory arguments and one optional argument -// - table_name: thename of the document database -// - si: the fields and their type for crating simple indexes (ex: name=string,age=integer) -// * mutable: make the table mutable / immutable (default is true, means mutable) +type DocRequest struct { + PodName string `json:"podName,omitempty"` + TableName string `json:"tableName,omitempty"` + SimpleIndex string `json:"si,omitempty"` + Mutable bool `json:"mutable,omitempty"` +} + +type SimpleDocRequest struct { + PodName string `json:"podName,omitempty"` + TableName string `json:"tableName,omitempty"` +} + +// DocCreateHandler godoc +// +// @Summary Create in doc table +// @Description DocCreateHandler is the api handler to create a new document database +// @Tags doc +// @Accept json +// @Produce json +// @Param doc_request body DocRequest true "doc table info" +// @Param Cookie header string true "cookie parameter" +// @Success 201 {object} response +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/doc/new [post] func (h *Handler) DocCreateHandler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { @@ -42,7 +60,7 @@ func (h *Handler) DocCreateHandler(w http.ResponseWriter, r *http.Request) { } decoder := json.NewDecoder(r.Body) - var docReq common.DocRequest + var docReq DocRequest err := decoder.Decode(&docReq) if err != nil { h.logger.Errorf("doc create: could not decode arguments") @@ -52,15 +70,15 @@ func (h *Handler) DocCreateHandler(w http.ResponseWriter, r *http.Request) { podName := docReq.PodName if podName == "" { - h.logger.Errorf("doc create: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "doc create: \"pod_name\" argument missing"}) + h.logger.Errorf("doc create: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "doc create: \"podName\" argument missing"}) return } name := docReq.TableName if name == "" { - h.logger.Errorf("doc create: \"table_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "doc create: \"table_name\" argument missing"}) + h.logger.Errorf("doc create: \"tableName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "doc create: \"tableName\" argument missing"}) return } diff --git a/pkg/api/doc_open.go b/pkg/api/doc_open.go index 1e73b2c9..8c45bdc9 100644 --- a/pkg/api/doc_open.go +++ b/pkg/api/doc_open.go @@ -20,15 +20,23 @@ import ( "encoding/json" "net/http" - "github.com/fairdatasociety/fairOS-dfs/cmd/common" - "github.com/fairdatasociety/fairOS-dfs/pkg/cookie" "resenje.org/jsonhttp" ) -// DocOpenHandler is the api handler to open a document database -// it has only one argument -// table_name: the name of the document database to open +// DocOpenHandler godoc +// +// @Summary Open a doc table +// @Description DocOpenHandler is the api handler to open a document database +// @Tags doc +// @Accept json +// @Produce json +// @Param doc_request body DocRequest true "doc table info" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} DocumentDBs +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/doc/open [post] func (h *Handler) DocOpenHandler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { @@ -38,7 +46,7 @@ func (h *Handler) DocOpenHandler(w http.ResponseWriter, r *http.Request) { } decoder := json.NewDecoder(r.Body) - var docReq common.DocRequest + var docReq DocRequest err := decoder.Decode(&docReq) if err != nil { h.logger.Errorf("doc open: could not decode arguments") @@ -48,15 +56,15 @@ func (h *Handler) DocOpenHandler(w http.ResponseWriter, r *http.Request) { podName := docReq.PodName if podName == "" { - h.logger.Errorf("doc open: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "doc open: \"pod_name\" argument missing"}) + h.logger.Errorf("doc open: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "doc open: \"podName\" argument missing"}) return } name := docReq.TableName if name == "" { - h.logger.Errorf("doc open: \"table_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "doc open: \"table_name\" argument missing"}) + h.logger.Errorf("doc open: \"tableName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "doc open: \"tableName\" argument missing"}) return } diff --git a/pkg/api/doc_put_get_del.go b/pkg/api/doc_put_get_del.go index 2098bc0a..e7d667dd 100644 --- a/pkg/api/doc_put_get_del.go +++ b/pkg/api/doc_put_get_del.go @@ -20,22 +20,41 @@ import ( "encoding/json" "net/http" - "github.com/fairdatasociety/fairOS-dfs/cmd/common" - "github.com/fairdatasociety/fairOS-dfs/pkg/cookie" "resenje.org/jsonhttp" ) +type DocPutRequest struct { + PodName string `json:"podName,omitempty"` + TableName string `json:"tableName,omitempty"` + Document string `json:"doc,omitempty"` +} + +type DocDeleteRequest struct { + PodName string `json:"podName,omitempty"` + TableName string `json:"tableName,omitempty"` + ID string `json:"id,omitempty"` +} + // DocGetResponse represents a single document row type DocGetResponse struct { Doc []byte `json:"doc"` } -// DocPutHandler is the api handler to add a document in to a document database -// it has two arguments -// table_name: the name of the document database -// doc: the document to add -func (h *Handler) DocPutHandler(w http.ResponseWriter, r *http.Request) { +// DocEntryPutHandler godoc +// +// @Summary Add a record in document datastore +// @Description DocEntryPutHandler is the api handler add a document in to a document datastore +// @Tags doc +// @Accept json +// @Produce json +// @Param doc_entry_put_request query DocPutRequest true "doc put request" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} response +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/doc/entry/put [post] +func (h *Handler) DocEntryPutHandler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { h.logger.Errorf("doc put: invalid request body type") @@ -44,7 +63,7 @@ func (h *Handler) DocPutHandler(w http.ResponseWriter, r *http.Request) { } decoder := json.NewDecoder(r.Body) - var docReq common.DocRequest + var docReq DocPutRequest err := decoder.Decode(&docReq) if err != nil { h.logger.Errorf("doc put: could not decode arguments") @@ -53,15 +72,15 @@ func (h *Handler) DocPutHandler(w http.ResponseWriter, r *http.Request) { } podName := docReq.PodName if podName == "" { - h.logger.Errorf("doc put: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "doc put: \"pod_name\" argument missing"}) + h.logger.Errorf("doc put: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "doc put: \"podName\" argument missing"}) return } name := docReq.TableName if name == "" { - h.logger.Errorf("doc put: \"table_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "doc put: \"table_name\" argument missing"}) + h.logger.Errorf("doc put: \"tableName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "doc put: \"tableName\" argument missing"}) return } @@ -94,34 +113,45 @@ func (h *Handler) DocPutHandler(w http.ResponseWriter, r *http.Request) { jsonhttp.OK(w, &response{Message: "added document to db"}) } -// DocGetHandler is the api handler to get a document from a document database -// it has two arguments -// table_name: the name of the document database -// id: the document id to get -func (h *Handler) DocGetHandler(w http.ResponseWriter, r *http.Request) { - keys, ok := r.URL.Query()["pod_name"] +// DocEntryGetHandler godoc +// +// @Summary Get a document from a document datastore +// @Description DocEntryGetHandler is the api handler to get a document from a document datastore +// @Tags doc +// @Accept json +// @Produce json +// @Param podName query string true "pod name" +// @Param tableName query string true "table name" +// @Param id query string true "id to search for" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} DocGetResponse +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/doc/entry/get [get] +func (h *Handler) DocEntryGetHandler(w http.ResponseWriter, r *http.Request) { + keys, ok := r.URL.Query()["podName"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("doc get: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "doc get: \"pod_name\" argument missing"}) + h.logger.Errorf("doc get: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "doc get: \"podName\" argument missing"}) return } podName := keys[0] if podName == "" { - h.logger.Errorf("doc get: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "doc get: \"pod_name\" argument missing"}) + h.logger.Errorf("doc get: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "doc get: \"podName\" argument missing"}) return } - keys, ok = r.URL.Query()["table_name"] + keys, ok = r.URL.Query()["tableName"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("doc get: \"table_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "doc get: \"table_name\" argument missing"}) + h.logger.Errorf("doc get: \"tableName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "doc get: \"tableName\" argument missing"}) return } name := keys[0] if name == "" { - h.logger.Errorf("doc get: \"table_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "doc get: \"table_name\" argument missing"}) + h.logger.Errorf("doc get: \"tableName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "doc get: \"tableName\" argument missing"}) return } @@ -165,11 +195,20 @@ func (h *Handler) DocGetHandler(w http.ResponseWriter, r *http.Request) { jsonhttp.OK(w, &getResponse) } -// DocDelHandler is the api handler to delete a document from a document database -// it has two arguments -// table_name: the name of the document database -// id: the document id to delete -func (h *Handler) DocDelHandler(w http.ResponseWriter, r *http.Request) { +// DocEntryDelHandler godoc +// +// @Summary Delete a document from a document datastore +// @Description DocEntryDelHandler is the api handler to delete a document from a document datastore +// @Tags doc +// @Accept json +// @Produce json +// @Param doc_entry_delete_request query DocDeleteRequest true "doc entry delete" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} response +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/doc/entry/delete [delete] +func (h *Handler) DocEntryDelHandler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { h.logger.Errorf("doc del: invalid request body type") @@ -178,7 +217,7 @@ func (h *Handler) DocDelHandler(w http.ResponseWriter, r *http.Request) { } decoder := json.NewDecoder(r.Body) - var docReq common.DocRequest + var docReq DocDeleteRequest err := decoder.Decode(&docReq) if err != nil { h.logger.Errorf("doc del: could not decode arguments") @@ -188,15 +227,15 @@ func (h *Handler) DocDelHandler(w http.ResponseWriter, r *http.Request) { podName := docReq.PodName if podName == "" { - h.logger.Errorf("doc del: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "doc del: \"pod_name\" argument missing"}) + h.logger.Errorf("doc del: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "doc del: \"podName\" argument missing"}) return } name := docReq.TableName if name == "" { - h.logger.Errorf("doc del: \"table_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "doc del: \"table_name\" argument missing"}) + h.logger.Errorf("doc del: \"tableName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "doc del: \"tableName\" argument missing"}) return } diff --git a/pkg/api/file_delete.go b/pkg/api/file_delete.go index ee2a6698..585cceaf 100644 --- a/pkg/api/file_delete.go +++ b/pkg/api/file_delete.go @@ -20,16 +20,31 @@ import ( "encoding/json" "net/http" - "github.com/fairdatasociety/fairOS-dfs/cmd/common" "github.com/fairdatasociety/fairOS-dfs/pkg/cookie" "github.com/fairdatasociety/fairOS-dfs/pkg/dfs" "github.com/fairdatasociety/fairOS-dfs/pkg/pod" "resenje.org/jsonhttp" ) -// FileDeleteHandler is the api handler to delete a file from a given pod -// it takes only one argument -// file_path: the absolute path of the file in the pod +type FileDeleteRequest struct { + PodName string `json:"podName,omitempty"` + FilePath string `json:"filePath,omitempty"` +} + +// FileDeleteHandler godoc +// +// @Summary Delete a file +// @Description FileReceiveHandler is the api handler to delete a file from a given pod +// @Tags file +// @Accept json +// @Produce json +// @Param file_delete_request body FileDeleteRequest true "pod name and file path" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} response +// @Failure 400 {object} response +// @Failure 404 {object} response +// @Failure 500 {object} response +// @Router /v1/file/delete [delete] func (h *Handler) FileDeleteHandler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { @@ -39,7 +54,7 @@ func (h *Handler) FileDeleteHandler(w http.ResponseWriter, r *http.Request) { } decoder := json.NewDecoder(r.Body) - var fsReq common.FileSystemRequest + var fsReq FileDeleteRequest err := decoder.Decode(&fsReq) if err != nil { h.logger.Errorf("file delete: could not decode arguments") @@ -49,15 +64,15 @@ func (h *Handler) FileDeleteHandler(w http.ResponseWriter, r *http.Request) { podName := fsReq.PodName if podName == "" { - h.logger.Errorf("file delete: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "file delete: \"pod_name\" argument missing"}) + h.logger.Errorf("file delete: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "file delete: \"podName\" argument missing"}) return } podFileWithPath := fsReq.FilePath if podFileWithPath == "" { - h.logger.Errorf("file delete: \"file_path\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "file delete: \"file_path\" argument missing"}) + h.logger.Errorf("file delete: \"filePath\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "file delete: \"filePath\" argument missing"}) return } diff --git a/pkg/api/file_download.go b/pkg/api/file_download.go index fe2d346d..9c902f09 100644 --- a/pkg/api/file_download.go +++ b/pkg/api/file_download.go @@ -27,54 +27,84 @@ import ( "resenje.org/jsonhttp" ) -// FileDownloadHandler is the api handler to download a file from a given pod -// it takes only one argument -// file_path: the absolute path of the file in the pod -func (h *Handler) FileDownloadHandler(w http.ResponseWriter, r *http.Request) { - podName := "" - podFileWithPath := "" - if r.Method == "POST" { - podName = r.FormValue("pod_name") - if podName == "" { - h.logger.Errorf("download: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "download: \"pod_name\" argument missing"}) - return - } +// FileDownloadHandlerPost godoc +// +// @Summary Download a file +// @Description FileDownloadHandlerPost is the api handler to download a file from a given pod +// @Tags file +// @Accept mpfd +// @Produce */* +// @Param podName formData string true "pod name" +// @Param filePath formData string true "file path" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {array} byte +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/file/download [post] +func (h *Handler) FileDownloadHandlerPost(w http.ResponseWriter, r *http.Request) { + podName := r.FormValue("podName") + if podName == "" { + h.logger.Errorf("download: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "download: \"podName\" argument missing"}) + return + } - podFileWithPath = r.FormValue("file_path") - if podFileWithPath == "" { - h.logger.Errorf("download: \"file_path\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "download: \"file_path\" argument missing"}) - return - } - } else { - keys, ok := r.URL.Query()["pod_name"] - if !ok || len(keys[0]) < 1 { - h.logger.Errorf("download \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "dir: \"pod_name\" argument missing"}) - return - } - podName = keys[0] - if podName == "" { - h.logger.Errorf("download: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "download: \"pod_name\" argument missing"}) - return - } + podFileWithPath := r.FormValue("filePath") + if podFileWithPath == "" { + h.logger.Errorf("download: \"filePath\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "download: \"filePath\" argument missing"}) + return + } - keys, ok = r.URL.Query()["file_path"] - if !ok || len(keys[0]) < 1 { - h.logger.Errorf("download: \"file_path\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "download: \"file_path\" argument missing"}) - return - } - podFileWithPath = keys[0] - if podFileWithPath == "" { - h.logger.Errorf("download: \"file_path\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "download: \"file_path\" argument missing"}) - return - } + h.handleDownload(w, r, podName, podFileWithPath) + +} + +// FileDownloadHandlerGet godoc +// +// @Summary Download a file +// @Description FileDownloadHandlerGet is the api handler to download a file from a given pod +// @Tags file +// @Accept json +// @Produce */* +// @Param podName query string true "pod name" +// @Param filePath query string true "file path" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {array} byte +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/file/download [get] +func (h *Handler) FileDownloadHandlerGet(w http.ResponseWriter, r *http.Request) { + keys, ok := r.URL.Query()["podName"] + if !ok || len(keys[0]) < 1 { + h.logger.Errorf("download \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "dir: \"podName\" argument missing"}) + return + } + podName := keys[0] + if podName == "" { + h.logger.Errorf("download: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "download: \"podName\" argument missing"}) + return } + keys, ok = r.URL.Query()["filePath"] + if !ok || len(keys[0]) < 1 { + h.logger.Errorf("download: \"filePath\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "download: \"filePath\" argument missing"}) + return + } + podFileWithPath := keys[0] + if podFileWithPath == "" { + h.logger.Errorf("download: \"filePath\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "download: \"filePath\" argument missing"}) + return + } + + h.handleDownload(w, r, podName, podFileWithPath) +} + +func (h *Handler) handleDownload(w http.ResponseWriter, r *http.Request, podName, podFileWithPath string) { // get values from cookie sessionId, err := cookie.GetSessionIdFromCookie(r) if err != nil { diff --git a/pkg/api/file_rename.go b/pkg/api/file_rename.go new file mode 100644 index 00000000..79d3b00d --- /dev/null +++ b/pkg/api/file_rename.go @@ -0,0 +1,113 @@ +/* +Copyright © 2020 FairOS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "encoding/json" + "net/http" + + "github.com/fairdatasociety/fairOS-dfs/cmd/common" + "github.com/fairdatasociety/fairOS-dfs/pkg/cookie" + "github.com/fairdatasociety/fairOS-dfs/pkg/dfs" + "github.com/fairdatasociety/fairOS-dfs/pkg/pod" + "resenje.org/jsonhttp" +) + +// FileRenameHandler godoc +// +// @Summary Info of a file +// @Description FileRenameHandler is the api handler to get the information of a file +// @Tags file +// @Accept json +// @Produce json +// @Param rename_request body common.RenameRequest true "old name & new name" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} response +// @Failure 400 {object} response +// @Failure 404 {object} response +// @Failure 500 {object} response +// @Router /v1/file/rename [post] +func (h *Handler) FileRenameHandler(w http.ResponseWriter, r *http.Request) { + contentType := r.Header.Get("Content-Type") + if contentType != jsonContentType { + h.logger.Errorf("file rename: invalid request body type") + jsonhttp.BadRequest(w, &response{Message: "file rename: invalid request body type"}) + return + } + + decoder := json.NewDecoder(r.Body) + var renameReq common.RenameRequest + err := decoder.Decode(&renameReq) + if err != nil { + h.logger.Errorf("file rename: could not decode arguments") + jsonhttp.BadRequest(w, &response{Message: "file rename: could not decode arguments"}) + return + } + + podName := renameReq.PodName + if podName == "" { + h.logger.Errorf("file rename: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "file rename: \"podName\" argument missing"}) + return + } + + podFileWithPath := renameReq.OldPath + if podFileWithPath == "" { + h.logger.Errorf("file rename: \"oldPath\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "file rename: \"oldPath\" argument missing"}) + return + } + + newPodFileWithPath := renameReq.NewPath + if newPodFileWithPath == "" { + h.logger.Errorf("file rename: \"newPath\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "file rename: \"newPath\" argument missing"}) + return + } + + // get values from cookie + sessionId, err := cookie.GetSessionIdFromCookie(r) + if err != nil { + h.logger.Errorf("file rename: invalid cookie: %v", err) + jsonhttp.BadRequest(w, &response{Message: ErrInvalidCookie.Error()}) + return + } + if sessionId == "" { + h.logger.Errorf("file rename: \"cookie-id\" parameter missing in cookie") + jsonhttp.BadRequest(w, &response{Message: "file rename: \"cookie-id\" parameter missing in cookie"}) + return + } + // delete file + err = h.dfsAPI.RenameFile(podName, podFileWithPath, newPodFileWithPath, sessionId) + if err != nil { + if err == dfs.ErrPodNotOpen { + h.logger.Errorf("file rename: %v", err) + jsonhttp.BadRequest(w, &response{Message: "file rename: " + err.Error()}) + return + } + if err == pod.ErrInvalidFile { + h.logger.Errorf("file rename: %v", err) + jsonhttp.NotFound(w, &response{Message: "file rename: " + err.Error()}) + return + } + h.logger.Errorf("file rename: %v", err) + jsonhttp.InternalServerError(w, &response{Message: "file rename: " + err.Error()}) + return + } + + jsonhttp.OK(w, &response{Message: "file renamed successfully"}) +} diff --git a/pkg/api/file_share.go b/pkg/api/file_share.go index 5cb382be..fc3fc63c 100644 --- a/pkg/api/file_share.go +++ b/pkg/api/file_share.go @@ -20,8 +20,6 @@ import ( "encoding/json" "net/http" - "github.com/fairdatasociety/fairOS-dfs/cmd/common" - "resenje.org/jsonhttp" "github.com/fairdatasociety/fairOS-dfs/pkg/cookie" @@ -30,18 +28,33 @@ import ( // ReceiveFileResponse represents the response for receiving a file type ReceiveFileResponse struct { - FileName string `json:"file_name"` + FileName string `json:"fileName"` } // FileSharingReference represents a file reference type FileSharingReference struct { - Reference string `json:"file_sharing_reference"` + Reference string `json:"fileSharingReference"` +} + +type FileShareRequest struct { + PodName string `json:"podName,omitempty"` + FilePath string `json:"filePath,omitempty"` + Destination string `json:"destUser,omitempty"` } -// FileShareHandler is the api handler to share a file from a given pod -// it takes two arguments -// file_path: the absolute path of the file in the pod -// dest_user: the address of the destination user (this is not used now) +// FileShareHandler godoc +// +// @Summary Share a file +// @Description FileShareHandler is the api handler to share a file from a given pod +// @Tags file +// @Accept json +// @Produce json +// @Param file_share_request body FileShareRequest true "file share request params" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} FileSharingReference +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/file/share [post] func (h *Handler) FileShareHandler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { @@ -51,7 +64,7 @@ func (h *Handler) FileShareHandler(w http.ResponseWriter, r *http.Request) { } decoder := json.NewDecoder(r.Body) - var fsReq common.FileSystemRequest + var fsReq FileShareRequest err := decoder.Decode(&fsReq) if err != nil { h.logger.Errorf("file share: could not decode arguments") @@ -61,21 +74,21 @@ func (h *Handler) FileShareHandler(w http.ResponseWriter, r *http.Request) { podName := fsReq.PodName if podName == "" { - h.logger.Errorf("file share: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "file share: \"pod_name\" argument missing"}) + h.logger.Errorf("file share: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "file share: \"podName\" argument missing"}) return } podFileWithPath := fsReq.FilePath if podFileWithPath == "" { - h.logger.Errorf("file share: \"pod_path_file\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "file share: \"pod_path_file\" argument missing"}) + h.logger.Errorf("file share: \"filePath\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "file share: \"filePath\" argument missing"}) return } destinationRef := fsReq.Destination if destinationRef == "" { - h.logger.Errorf("file share: \"to\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "file share: \"to\" argument missing"}) + h.logger.Errorf("file share: \"destUser\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "file share: \"destUser\" argument missing"}) return } @@ -105,41 +118,52 @@ func (h *Handler) FileShareHandler(w http.ResponseWriter, r *http.Request) { }) } -// FileReceiveHandler is the api handler to receive a file in a given pod -// it takes two arguments -// pod_name: the name of the pod -// sharing_ref: the sharing reference of a file +// FileReceiveHandler godoc +// +// @Summary Receive a file +// @Description FileReceiveHandler is the api handler to receive a file in a given pod +// @Tags file +// @Accept json +// @Produce json +// @Param podName query string true "pod name" +// @Param sharingRef query string true "sharing reference" +// @Param dirPath query string true "file location" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} FileSharingReference +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/file/receive [get] func (h *Handler) FileReceiveHandler(w http.ResponseWriter, r *http.Request) { - keys, ok := r.URL.Query()["pod_name"] + keys, ok := r.URL.Query()["podName"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("file receive: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "file receive: \"pod_name\" argument missing"}) + h.logger.Errorf("file receive: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "file receive: \"podName\" argument missing"}) return } podName := keys[0] if podName == "" { - h.logger.Errorf("file receive: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "file receive: \"pod_name\" argument missing"}) + h.logger.Errorf("file receive: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "file receive: \"podName\" argument missing"}) return } - keys, ok = r.URL.Query()["sharing_ref"] + keys, ok = r.URL.Query()["sharingRef"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("file receive: \"sharing_ref\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "file receive: \"sharing_ref\" argument missing"}) + h.logger.Errorf("file receive: \"sharingRef\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "file receive: \"sharingRef\" argument missing"}) return } sharingRefString := keys[0] if sharingRefString == "" { - h.logger.Errorf("file receive: \"ref\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "file receive: \"ref\" argument missing"}) + h.logger.Errorf("file receive: \"sharingRef\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "file receive: \"sharingRef\" argument missing"}) return } - keys1, ok1 := r.URL.Query()["dir_path"] + keys1, ok1 := r.URL.Query()["dirPath"] if !ok1 || len(keys1[0]) < 1 || keys1[0] == "" { - h.logger.Errorf("file receive: \"dir_path\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "file receive: \"dir_path\" argument missing"}) + h.logger.Errorf("file receive: \"dirPath\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "file receive: \"dirPath\" argument missing"}) return } dir := keys1[0] @@ -177,34 +201,30 @@ func (h *Handler) FileReceiveHandler(w http.ResponseWriter, r *http.Request) { }) } -// FileReceiveInfoHandler is the api handler to receive a file info -// it takes two arguments -// pod_name: the name of the pod -// sharing_ref: the sharing reference of a file +// FileReceiveInfoHandler godoc +// +// @Summary Receive a file info +// @Description FileReceiveInfoHandler is the api handler to receive a file info +// @Tags file +// @Accept json +// @Produce json +// @Param sharingRef query string true "sharing reference" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} user.ReceiveFileInfo +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/file/receiveinfo [get] func (h *Handler) FileReceiveInfoHandler(w http.ResponseWriter, r *http.Request) { - keys, ok := r.URL.Query()["pod_name"] - if !ok || len(keys[0]) < 1 { - h.logger.Errorf("file receive info: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "file receive info: \"pod_name\" argument missing"}) - return - } - podName := keys[0] - if podName == "" { - h.logger.Errorf("file receive info: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "file receive info: \"pod_name\" argument missing"}) - return - } - - keys, ok = r.URL.Query()["sharing_ref"] + keys, ok := r.URL.Query()["sharingRef"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("file receive info: \"sharing_ref\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "file receive info: \"sharing_ref\" argument missing"}) + h.logger.Errorf("file receive info: \"sharingRef\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "file receive info: \"sharingRef\" argument missing"}) return } sharingRefString := keys[0] if sharingRefString == "" { - h.logger.Errorf("file receive info: \"ref\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "file receive info: \"ref\" argument missing"}) + h.logger.Errorf("file receive info: \"sharingRef\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "file receive info: \"sharingRef\" argument missing"}) return } @@ -228,7 +248,7 @@ func (h *Handler) FileReceiveInfoHandler(w http.ResponseWriter, r *http.Request) return } - receiveInfo, err := h.dfsAPI.ReceiveInfo(podName, sessionId, sharingRef) + receiveInfo, err := h.dfsAPI.ReceiveInfo(sessionId, sharingRef) if err != nil { h.logger.Errorf("file receive info: %v", err) jsonhttp.InternalServerError(w, &response{Message: "file receive info: " + err.Error()}) diff --git a/pkg/api/file_stat.go b/pkg/api/file_stat.go index 658a8f7f..4c3002eb 100644 --- a/pkg/api/file_stat.go +++ b/pkg/api/file_stat.go @@ -25,33 +25,44 @@ import ( "github.com/fairdatasociety/fairOS-dfs/pkg/dfs" ) -// FileStatHandler is the api handler to get the information of a file -// it takes only one argument -// file_path: the absolute path of the file in the pod +// FileStatHandler godoc +// +// @Summary Info of a file +// @Description FileStatHandler is the api handler to get the information of a file +// @Tags file +// @Accept json +// @Produce json +// @Param podName query string true "pod name" +// @Param filePath query string true "file path" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} file.Stats +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/file/stat [get] func (h *Handler) FileStatHandler(w http.ResponseWriter, r *http.Request) { - keys, ok := r.URL.Query()["pod_name"] + keys, ok := r.URL.Query()["podName"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("file stat: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "file stat: \"pod_name\" argument missing"}) + h.logger.Errorf("file stat: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "file stat: \"podName\" argument missing"}) return } podName := keys[0] if podName == "" { - h.logger.Errorf("file stat: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "file stat: \"pod_name\" argument missing"}) + h.logger.Errorf("file stat: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "file stat: \"podName\" argument missing"}) return } - keys, ok = r.URL.Query()["file_path"] + keys, ok = r.URL.Query()["filePath"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("file stat: \"file_path\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "file stat: \"file_path\" argument missing"}) + h.logger.Errorf("file stat: \"filePath\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "file stat: \"filePath\" argument missing"}) return } podFileWithPath := keys[0] if podFileWithPath == "" { - h.logger.Errorf("file stat: \"file_path\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "file stat: \"pod_path_file\" argument missing"}) + h.logger.Errorf("file stat: \"filePath\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "file stat: \"filePath\" argument missing"}) return } diff --git a/pkg/api/file_upload.go b/pkg/api/file_upload.go index 7f7c233a..232c96cd 100644 --- a/pkg/api/file_upload.go +++ b/pkg/api/file_upload.go @@ -32,7 +32,7 @@ type UploadFileResponse struct { } type UploadResponse struct { - FileName string `json:"file_name"` + FileName string `json:"fileName"` Message string `json:"message,omitempty"` } @@ -41,31 +41,41 @@ const ( CompressionHeader = "fairOS-dfs-Compression" ) -// FileUploadHandler is the api handler to upload a file from a local file system to the dfs -// it takes three argument -// - dir_path: the directory in the pod where the file should be uploaded -// - block_size: the block size of the file -// - files: the argument name of the file to upload is attached in the multipart file upload -// Header: -// - fairOS-dfs-Compression: gzip/snappy +// FileUploadHandler godoc +// +// @Summary Upload a file +// @Description FileUploadHandler is the api handler to upload a file from a local file system to the dfs +// @Tags file +// @Accept mpfd +// @Produce json +// @Param podName formData string true "pod name" +// @Param dirPath formData string true "location" +// @Param blockSize formData string true "block size to break the file" example(4Kb, 1Mb) +// @Param files formData file true "file to upload" +// @Param fairOS-dfs-Compression header string false "cookie parameter" example(snappy, gzip) +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} response +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/file/upload [Post] func (h *Handler) FileUploadHandler(w http.ResponseWriter, r *http.Request) { - podName := r.FormValue("pod_name") + podName := r.FormValue("podName") if podName == "" { - h.logger.Errorf("file upload: \"pod_name\" argument missing") + h.logger.Errorf("file upload: \"podName\" argument missing") jsonhttp.BadRequest(w, &response{Message: "file upload: \"pod_name\" argument missing"}) return } - podPath := r.FormValue("dir_path") + podPath := r.FormValue("dirPath") if podPath == "" { - h.logger.Errorf("file upload: \"dir_path\" argument missing") + h.logger.Errorf("file upload: \"dirPath\" argument missing") jsonhttp.BadRequest(w, &response{Message: "file upload: \"dir_path\" argument missing"}) return } - blockSize := r.FormValue("block_size") + blockSize := r.FormValue("blockSize") if blockSize == "" { - h.logger.Errorf("file upload: \"block_size\" argument missing") + h.logger.Errorf("file upload: \"blockSize\" argument missing") jsonhttp.BadRequest(w, &response{Message: "file upload: \"block_size\" argument missing"}) return } @@ -103,7 +113,7 @@ func (h *Handler) FileUploadHandler(w http.ResponseWriter, r *http.Request) { return } - // get the files parameter from the multi part + // get the files parameter from the multipart err = r.ParseMultipartForm(defaultMaxMemory) if err != nil { h.logger.Errorf("file upload: %v", err) diff --git a/pkg/api/handler.go b/pkg/api/handler.go index 83c4694f..2f06df86 100644 --- a/pkg/api/handler.go +++ b/pkg/api/handler.go @@ -44,9 +44,10 @@ func NewHandler(dataDir, beeApi, cookieDomain, postageBlockId string, whiteliste } // NewMockHandler is used for tests only -func NewMockHandler(dfsAPI *dfs.API, logger logging.Logger) *Handler { +func NewMockHandler(dfsAPI *dfs.API, logger logging.Logger, whitelistedOrigins []string) *Handler { return &Handler{ - dfsAPI: dfsAPI, - logger: logger, + dfsAPI: dfsAPI, + logger: logger, + whitelistedOrigins: whitelistedOrigins, } } diff --git a/pkg/api/kv_count.go b/pkg/api/kv_count.go index b55768eb..1b8ff801 100644 --- a/pkg/api/kv_count.go +++ b/pkg/api/kv_count.go @@ -20,8 +20,6 @@ import ( "encoding/json" "net/http" - "github.com/fairdatasociety/fairOS-dfs/cmd/common" - "github.com/fairdatasociety/fairOS-dfs/pkg/cookie" "resenje.org/jsonhttp" ) @@ -29,6 +27,20 @@ import ( // KVCountHandler is the api handler to count the number of rows in a key value table // it has ony one argument // - table_name: the name of the key value table + +// KVCountHandler godoc +// +// @Summary Count rows in a key value table +// @Description KVCountHandler is the api handler to count the number of rows in a key value table +// @Tags kv +// @Accept json +// @Produce json +// @Param kv_table_request body KVTableRequest true "kv table request" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} collection.TableKeyCount +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/kv/count [post] func (h *Handler) KVCountHandler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { @@ -38,7 +50,7 @@ func (h *Handler) KVCountHandler(w http.ResponseWriter, r *http.Request) { } decoder := json.NewDecoder(r.Body) - var kvReq common.KVRequest + var kvReq KVTableRequest err := decoder.Decode(&kvReq) if err != nil { h.logger.Errorf("kv count: could not decode arguments") @@ -48,15 +60,15 @@ func (h *Handler) KVCountHandler(w http.ResponseWriter, r *http.Request) { podName := kvReq.PodName if podName == "" { - h.logger.Errorf("kv count: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "kv count: \"pod_name\" argument missing"}) + h.logger.Errorf("kv count: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "kv count: \"podName\" argument missing"}) return } name := kvReq.TableName if name == "" { - h.logger.Errorf("kv count: \"table_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "kv count: \"table_name\" argument missing"}) + h.logger.Errorf("kv count: \"tableName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "kv count: \"tableName\" argument missing"}) return } diff --git a/pkg/api/kv_delete.go b/pkg/api/kv_delete.go index 63abd534..71ca1460 100644 --- a/pkg/api/kv_delete.go +++ b/pkg/api/kv_delete.go @@ -20,15 +20,23 @@ import ( "encoding/json" "net/http" - "github.com/fairdatasociety/fairOS-dfs/cmd/common" - "github.com/fairdatasociety/fairOS-dfs/pkg/cookie" "resenje.org/jsonhttp" ) -// KVDeleteHandler is the api handler to delete a key value table -// it has ony one argument -// - table_name: the name of the key value table +// KVDeleteHandler godoc +// +// @Summary Delete a key value table +// @Description KVDeleteHandler is the api handler to delete a key value table +// @Tags kv +// @Accept json +// @Produce json +// @Param kv_table_request body KVTableRequest true "kv table request" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} response +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/kv/delete [delete] func (h *Handler) KVDeleteHandler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { @@ -38,7 +46,7 @@ func (h *Handler) KVDeleteHandler(w http.ResponseWriter, r *http.Request) { } decoder := json.NewDecoder(r.Body) - var kvReq common.KVRequest + var kvReq KVTableRequest err := decoder.Decode(&kvReq) if err != nil { h.logger.Errorf("kv delete: could not decode arguments") @@ -48,15 +56,15 @@ func (h *Handler) KVDeleteHandler(w http.ResponseWriter, r *http.Request) { podName := kvReq.PodName if podName == "" { - h.logger.Errorf("kv delete: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "kv delete: \"pod_name\" argument missing"}) + h.logger.Errorf("kv delete: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "kv delete: \"podName\" argument missing"}) return } name := kvReq.TableName if name == "" { - h.logger.Errorf("kv delete: \"table_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "kv delete: \"table_name\" argument missing"}) + h.logger.Errorf("kv delete: \"tableName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "kv delete: \"tableName\" argument missing"}) return } diff --git a/pkg/api/kv_export.go b/pkg/api/kv_export.go index ae4cc155..7805c00f 100644 --- a/pkg/api/kv_export.go +++ b/pkg/api/kv_export.go @@ -6,19 +6,33 @@ import ( "net/http" "strconv" - "github.com/fairdatasociety/fairOS-dfs/cmd/common" "github.com/fairdatasociety/fairOS-dfs/pkg/cookie" "resenje.org/jsonhttp" ) const MaxExportLimit = 100 -// KVExportHandler is the api handler to export from a particular key with the given prefix -// it takes four arguments, 2 mandatory and two optional -// - table_name: the name of the kv table -// - start_prefix: the prefix of the key to seek -// * end_prefix: the prefix of the end key -// * limit: the threshold for the number of keys to go when get_next is called +type KVExportRequest struct { + PodName string `json:"podName,omitempty"` + TableName string `json:"tableName,omitempty"` + StartPrefix string `json:"startPrefix,omitempty"` + EndPrefix string `json:"endPrefix,omitempty"` + Limit string `json:"limit,omitempty"` +} + +// KVExportHandler godoc +// +// @Summary Export from a particular key with the given prefix +// @Description KVExportHandler is the api handler to export from a particular key with the given prefix +// @Tags kv +// @Accept json +// @Produce json +// @Param export_request body KVExportRequest true "kv export info" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} []map[string]interface{} +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/kv/export [Post] func (h *Handler) KVExportHandler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { @@ -28,7 +42,7 @@ func (h *Handler) KVExportHandler(w http.ResponseWriter, r *http.Request) { } decoder := json.NewDecoder(r.Body) - var kvReq common.KVRequest + var kvReq KVExportRequest err := decoder.Decode(&kvReq) if err != nil { h.logger.Errorf("kv export: could not decode arguments") @@ -38,15 +52,15 @@ func (h *Handler) KVExportHandler(w http.ResponseWriter, r *http.Request) { podName := kvReq.PodName if podName == "" { - h.logger.Errorf("kv export: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "kv export: \"pod_name\" argument missing"}) + h.logger.Errorf("kv export: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "kv export: \"podName\" argument missing"}) return } name := kvReq.TableName if name == "" { - h.logger.Errorf("kv export: \"table_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "kv export: \"table_name\" argument missing"}) + h.logger.Errorf("kv export: \"tableName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "kv export: \"tableName\" argument missing"}) return } diff --git a/pkg/api/kv_loadcsv.go b/pkg/api/kv_loadcsv.go index 4cfef503..ef4de554 100644 --- a/pkg/api/kv_loadcsv.go +++ b/pkg/api/kv_loadcsv.go @@ -32,18 +32,35 @@ import ( // it has two arguments // - table_name: the name of the key value table // - csv: the name of the parameter which contains the file to upload in a multipart upload + +// KVLoadCSVHandler godoc +// +// @Summary Upload a csv file in kv table +// @Description KVLoadCSVHandler is the api handler to load a csv file as key and value in a KV table +// @Tags kv +// @Accept mpfd +// @Produce json +// @Param podName formData string true "pod name" +// @Param tableName formData string true "table name" +// @Param memory formData string false "keep in memory" +// @Param csv formData file true "file to upload" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} response +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/kv/loadcsv [Post] func (h *Handler) KVLoadCSVHandler(w http.ResponseWriter, r *http.Request) { - podName := r.FormValue("pod_name") + podName := r.FormValue("podName") if podName == "" { - h.logger.Errorf("kv loadcsv: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "kv loadcsv: \"pod_name\" argument missing"}) + h.logger.Errorf("kv loadcsv: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "kv loadcsv: \"podName\" argument missing"}) return } - name := r.FormValue("table_name") + name := r.FormValue("tableName") if name == "" { - h.logger.Errorf("kv loadcsv: \"table_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "kv loadcsv: \"table_name\" argument missing"}) + h.logger.Errorf("kv loadcsv: \"tableName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "kv loadcsv: \"tableName\" argument missing"}) return } diff --git a/pkg/api/kv_ls.go b/pkg/api/kv_ls.go index 06815032..47438bd9 100644 --- a/pkg/api/kv_ls.go +++ b/pkg/api/kv_ls.go @@ -27,18 +27,29 @@ type Collections struct { Tables []Collection } type Collection struct { - Name string `json:"table_name"` + Name string `json:"tableName"` IndexedColumns []string `json:"indexes"` CollectionType string `json:"type"` } -// KVListHandler is the api handler to list all the key value tables in a pod -// it has no arguments +// KVListHandler godoc +// +// @Summary List all key value tables +// @Description KVListHandler is the api handler to list all the key value tables in a pod +// @Tags kv +// @Accept json +// @Produce json +// @Param podName query string true "pod name" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} Collections +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/kv/ls [get] func (h *Handler) KVListHandler(w http.ResponseWriter, r *http.Request) { - keys, ok := r.URL.Query()["pod_name"] + keys, ok := r.URL.Query()["podName"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("kv ls: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "kv ls: \"pod_name\" argument missing"}) + h.logger.Errorf("kv ls: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "kv ls: \"podName\" argument missing"}) return } podName := keys[0] diff --git a/pkg/api/kv_new.go b/pkg/api/kv_new.go index 56571e06..c53474ca 100644 --- a/pkg/api/kv_new.go +++ b/pkg/api/kv_new.go @@ -20,18 +20,31 @@ import ( "encoding/json" "net/http" - "github.com/fairdatasociety/fairOS-dfs/cmd/common" - "github.com/fairdatasociety/fairOS-dfs/pkg/collection" "github.com/fairdatasociety/fairOS-dfs/pkg/cookie" "resenje.org/jsonhttp" ) -// KVCreateHandler is the api handler to create a key value table -// it takes two arguments -// - table_name: the name of the kv table -// - index_type: the name of the index (ex: string, number) +type KVTableRequest struct { + PodName string `json:"podName,omitempty"` + TableName string `json:"tableName,omitempty"` + IndexType string `json:"indexType,omitempty"` +} + +// KVCreateHandler godoc +// +// @Summary Create a key value table +// @Description KVCreateHandler is the api handler to create a key value table +// @Tags kv +// @Accept json +// @Produce json +// @Param kv_table_request body KVTableRequest true "kv table request" +// @Param Cookie header string true "cookie parameter" +// @Success 201 {object} response +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/kv/new [post] func (h *Handler) KVCreateHandler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { @@ -41,7 +54,7 @@ func (h *Handler) KVCreateHandler(w http.ResponseWriter, r *http.Request) { } decoder := json.NewDecoder(r.Body) - var kvReq common.KVRequest + var kvReq KVTableRequest err := decoder.Decode(&kvReq) if err != nil { h.logger.Errorf("kv create: could not decode arguments") @@ -51,15 +64,15 @@ func (h *Handler) KVCreateHandler(w http.ResponseWriter, r *http.Request) { podName := kvReq.PodName if podName == "" { - h.logger.Errorf("kv create: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "kv create: \"pod_name\" argument missing"}) + h.logger.Errorf("kv create: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "kv create: \"podName\" argument missing"}) return } name := kvReq.TableName if name == "" { - h.logger.Errorf("kv create: \"table_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "kv create: \"table_name\" argument missing"}) + h.logger.Errorf("kv create: \"tableName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "kv create: \"tableName\" argument missing"}) return } diff --git a/pkg/api/kv_open.go b/pkg/api/kv_open.go index c3abf7f2..7a3b6801 100644 --- a/pkg/api/kv_open.go +++ b/pkg/api/kv_open.go @@ -20,15 +20,23 @@ import ( "encoding/json" "net/http" - "github.com/fairdatasociety/fairOS-dfs/cmd/common" - "github.com/fairdatasociety/fairOS-dfs/pkg/cookie" "resenje.org/jsonhttp" ) -// KVOpenHandler is the api handler to open a key value table -// it takes only one argument -// - table_name: the name of the kv table +// KVOpenHandler godoc +// +// @Summary Open a key value table +// @Description KVOpenHandler is the api handler to open a key value table +// @Tags kv +// @Accept json +// @Produce json +// @Param kv_table_request body KVTableRequest true "kv table request" +// @Param Cookie header string true "cookie parameter" +// @Success 201 {object} response +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/kv/open [post] func (h *Handler) KVOpenHandler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { @@ -38,7 +46,7 @@ func (h *Handler) KVOpenHandler(w http.ResponseWriter, r *http.Request) { } decoder := json.NewDecoder(r.Body) - var kvReq common.KVRequest + var kvReq KVTableRequest err := decoder.Decode(&kvReq) if err != nil { h.logger.Errorf("kv open: could not decode arguments") @@ -48,15 +56,15 @@ func (h *Handler) KVOpenHandler(w http.ResponseWriter, r *http.Request) { podName := kvReq.PodName if podName == "" { - h.logger.Errorf("kv open: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "kv open: \"pod_name\" argument missing"}) + h.logger.Errorf("kv open: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "kv open: \"podName\" argument missing"}) return } name := kvReq.TableName if name == "" { - h.logger.Errorf("kv open: \"table_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "kv open: \"table_name\" argument missing"}) + h.logger.Errorf("kv open: \"tableName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "kv open: \"tableName\" argument missing"}) return } diff --git a/pkg/api/kv_put_get_del.go b/pkg/api/kv_put_get_del.go index 3f744431..81f42319 100644 --- a/pkg/api/kv_put_get_del.go +++ b/pkg/api/kv_put_get_del.go @@ -21,12 +21,24 @@ import ( "fmt" "net/http" - "github.com/fairdatasociety/fairOS-dfs/cmd/common" "github.com/fairdatasociety/fairOS-dfs/pkg/collection" "github.com/fairdatasociety/fairOS-dfs/pkg/cookie" "resenje.org/jsonhttp" ) +type KVEntryRequest struct { + PodName string `json:"podName,omitempty"` + TableName string `json:"tableName,omitempty"` + Key string `json:"key,omitempty"` + Value string `json:"value,omitempty"` +} + +type KVEntryDeleteRequest struct { + PodName string `json:"podName,omitempty"` + TableName string `json:"tableName,omitempty"` + Key string `json:"key,omitempty"` +} + type KVResponse struct { Keys []string `json:"keys,omitempty"` Values []byte `json:"values"` @@ -37,11 +49,19 @@ type KVResponseRaw struct { Values string `json:"values"` } -// KVPutHandler is the api handler to insert a key and value in to the kv table -// it takes three arguments -// - table_name: the name of the kv table -// - key: the key string -// - value: the value to insert in bytes +// KVPutHandler godoc +// +// @Summary put key and value in the kv table +// @Description KVPutHandler is the api handler to put a key-value in the kv table +// @Tags kv +// @Accept json +// @Produce json +// @Param kv_entry body KVEntryRequest true "kv entry" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} response +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/kv/entry/put [post] func (h *Handler) KVPutHandler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { @@ -51,7 +71,7 @@ func (h *Handler) KVPutHandler(w http.ResponseWriter, r *http.Request) { } decoder := json.NewDecoder(r.Body) - var kvReq common.KVRequest + var kvReq KVEntryRequest err := decoder.Decode(&kvReq) if err != nil { h.logger.Errorf("kv put: could not decode arguments") @@ -61,15 +81,15 @@ func (h *Handler) KVPutHandler(w http.ResponseWriter, r *http.Request) { podName := kvReq.PodName if podName == "" { - h.logger.Errorf("kv put: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "kv put: \"pod_name\" argument missing"}) + h.logger.Errorf("kv put: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "kv put: \"podName\" argument missing"}) return } name := kvReq.TableName if name == "" { - h.logger.Errorf("kv put: \"table_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "kv put: \"table_name\" argument missing"}) + h.logger.Errorf("kv put: \"tableName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "kv put: \"tableName\" argument missing"}) return } @@ -109,42 +129,52 @@ func (h *Handler) KVPutHandler(w http.ResponseWriter, r *http.Request) { jsonhttp.OK(w, &response{Message: "key added"}) } -// KVGetHandler is the api handler to get a value from the kv table -// it takes three arguments -// - pod_name: the name of the pod -// - table_name: the name of the kv table -// - key: the key string +// KVGetHandler godoc +// +// @Summary get value from the kv table +// @Description KVGetHandler is the api handler to get a value from the kv table +// @Tags kv +// @Accept json +// @Produce json +// @Param podName query string true "pod name" +// @Param tableName query string true "table name" +// @Param key query string true "key" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} KVResponse +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/kv/entry/get [get] func (h *Handler) KVGetHandler(w http.ResponseWriter, r *http.Request) { - keys, ok := r.URL.Query()["pod_name"] + keys, ok := r.URL.Query()["podName"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("kv get: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "kv get: \"pod_name\" argument missing"}) + h.logger.Errorf("kv get: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "kv get: \"podName\" argument missing"}) return } podName := keys[0] if podName == "" { - h.logger.Errorf("kv get: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "kv get: \"pod_name\" argument missing"}) + h.logger.Errorf("kv get: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "kv get: \"podName\" argument missing"}) return } - keys, ok = r.URL.Query()["table_name"] + keys, ok = r.URL.Query()["tableName"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("kv get: \"table_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "kv get: \"table_name\" argument missing"}) + h.logger.Errorf("kv get: \"tableName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "kv get: \"tableName\" argument missing"}) return } name := keys[0] if name == "" { - h.logger.Errorf("kv get: \"table_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "kv get: \"table_name\" argument missing"}) + h.logger.Errorf("kv get: \"tableName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "kv get: \"tableName\" argument missing"}) return } keys, ok = r.URL.Query()["key"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("kv get: \"sharing_ref\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "kv get: \"sharing_ref\" argument missing"}) + h.logger.Errorf("kv get: \"key\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "kv get: \"key\" argument missing"}) return } key := keys[0] @@ -190,43 +220,53 @@ func (h *Handler) KVGetHandler(w http.ResponseWriter, r *http.Request) { jsonhttp.OK(w, &resp) } -// KVGetDataHandler is the api handler to get a value from the kv table -// it takes four arguments -// - pod_name: the name of the pod -// - table_name: the name of the kv table -// - key: the key string -// - format: whether the data should be string or byte-string +// KVGetDataHandler godoc +// +// @Summary get value from the kv table +// @Description KVGetDataHandler is the api handler to get raw value from the kv table +// @Tags kv +// @Accept json +// @Produce json +// @Param podName query string true "pod name" +// @Param tableName query string true "table name" +// @Param key query string true "key" +// @Param format query string false "format of the value" example(byte-string, string) +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} KVResponseRaw +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/kv/entry/get-data [get] func (h *Handler) KVGetDataHandler(w http.ResponseWriter, r *http.Request) { - keys, ok := r.URL.Query()["pod_name"] + keys, ok := r.URL.Query()["podName"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("kv get: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "kv get: \"pod_name\" argument missing"}) + h.logger.Errorf("kv get: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "kv get: \"podName\" argument missing"}) return } podName := keys[0] if podName == "" { - h.logger.Errorf("kv get: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "kv get: \"pod_name\" argument missing"}) + h.logger.Errorf("kv get: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "kv get: \"podName\" argument missing"}) return } - keys, ok = r.URL.Query()["table_name"] + keys, ok = r.URL.Query()["tableName"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("kv get: \"table_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "kv get: \"table_name\" argument missing"}) + h.logger.Errorf("kv get: \"tableName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "kv get: \"tableName\" argument missing"}) return } name := keys[0] if name == "" { - h.logger.Errorf("kv get: \"table_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "kv get: \"table_name\" argument missing"}) + h.logger.Errorf("kv get: \"tableName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "kv get: \"tableName\" argument missing"}) return } keys, ok = r.URL.Query()["key"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("kv get: \"sharing_ref\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "kv get: \"sharing_ref\" argument missing"}) + h.logger.Errorf("kv get: \"key\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "kv get: \"key\" argument missing"}) return } key := keys[0] @@ -296,10 +336,19 @@ func (h *Handler) KVGetDataHandler(w http.ResponseWriter, r *http.Request) { jsonhttp.OK(w, &resp) } -// KVDelHandler is the api handler to delete a key and value from the kv table -// it takes two arguments -// - table_name: the name of the kv table -// - key: the key string +// KVDelHandler godoc +// +// @Summary Delete key-value from the kv table +// @Description KVDelHandler is the api handler to delete a key and value from the kv table +// @Tags kv +// @Accept json +// @Produce json +// @Param delete_request body KVEntryDeleteRequest true "delete request" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} KVResponseRaw +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/kv/entry/del [delete] func (h *Handler) KVDelHandler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { @@ -309,7 +358,7 @@ func (h *Handler) KVDelHandler(w http.ResponseWriter, r *http.Request) { } decoder := json.NewDecoder(r.Body) - var kvReq common.KVRequest + var kvReq KVEntryDeleteRequest err := decoder.Decode(&kvReq) if err != nil { h.logger.Errorf("kv delete: could not decode arguments") @@ -319,15 +368,15 @@ func (h *Handler) KVDelHandler(w http.ResponseWriter, r *http.Request) { podName := kvReq.PodName if podName == "" { - h.logger.Errorf("kv del: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, "kv del: \"pod_name\" argument missing") + h.logger.Errorf("kv del: \"podName\" argument missing") + jsonhttp.BadRequest(w, "kv del: \"podName\" argument missing") return } name := kvReq.TableName if name == "" { - h.logger.Errorf("kv del: \"table_name\" argument missing") - jsonhttp.BadRequest(w, "kv del: \"table_name\" argument missing") + h.logger.Errorf("kv del: \"tableName\" argument missing") + jsonhttp.BadRequest(w, "kv del: \"tableName\" argument missing") return } @@ -360,42 +409,52 @@ func (h *Handler) KVDelHandler(w http.ResponseWriter, r *http.Request) { jsonhttp.OK(w, "key deleted") } -// KVPresentHandler is the api handler to check if a value exists in the kv table -// it takes three arguments -// - pod_name: the name of the pod -// - table_name: the name of the kv table -// - key: the key string +// KVPresentHandler godoc +// +// @Summary Check if a value exists in the kv table +// @Description KVPresentHandler is the api handler to check if a value exists in the kv table +// @Tags kv +// @Accept json +// @Produce json +// @Param podName query string true "pod name" +// @Param tableName query string true "table name" +// @Param key query string true "key" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} response +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/kv/entry/present [get] func (h *Handler) KVPresentHandler(w http.ResponseWriter, r *http.Request) { - keys, ok := r.URL.Query()["pod_name"] + keys, ok := r.URL.Query()["podName"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("kv get: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, "kv get: \"pod_name\" argument missing") + h.logger.Errorf("kv get: \"podName\" argument missing") + jsonhttp.BadRequest(w, "kv get: \"podName\" argument missing") return } podName := keys[0] if podName == "" { - h.logger.Errorf("kv get: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, "kv get: \"pod_name\" argument missing") + h.logger.Errorf("kv get: \"podName\" argument missing") + jsonhttp.BadRequest(w, "kv get: \"podName\" argument missing") return } - keys, ok = r.URL.Query()["table_name"] + keys, ok = r.URL.Query()["tableName"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("kv get: \"table_name\" argument missing") - jsonhttp.BadRequest(w, "kv get: \"table_name\" argument missing") + h.logger.Errorf("kv get: \"tableName\" argument missing") + jsonhttp.BadRequest(w, "kv get: \"tableName\" argument missing") return } name := keys[0] if name == "" { - h.logger.Errorf("kv get: \"table_name\" argument missing") - jsonhttp.BadRequest(w, "kv get: \"table_name\" argument missing") + h.logger.Errorf("kv get: \"tableName\" argument missing") + jsonhttp.BadRequest(w, "kv get: \"tableName\" argument missing") return } keys, ok = r.URL.Query()["key"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("kv get: \"sharing_ref\" argument missing") - jsonhttp.BadRequest(w, "kv get: \"sharing_ref\" argument missing") + h.logger.Errorf("kv get: \"key\" argument missing") + jsonhttp.BadRequest(w, "kv get: \"key\" argument missing") return } key := keys[0] diff --git a/pkg/api/kv_seek_getnext.go b/pkg/api/kv_seek_getnext.go index cc84da56..9a5729cf 100644 --- a/pkg/api/kv_seek_getnext.go +++ b/pkg/api/kv_seek_getnext.go @@ -22,8 +22,6 @@ import ( "net/http" "strconv" - "github.com/fairdatasociety/fairOS-dfs/cmd/common" - "github.com/fairdatasociety/fairOS-dfs/pkg/collection" "github.com/fairdatasociety/fairOS-dfs/pkg/cookie" "resenje.org/jsonhttp" @@ -33,12 +31,19 @@ const ( DefaultSeekLimit = "10" ) -// KVSeekHandler is the api handler to seek to a particular key with the given prefix -// it takes four arguments, 2 mandatory and two optional -// - table_name: the name of the kv table -// - start_prefix: the prefix of the key to seek -// * end_prefix: the prefix of the end key -// * limit: the threshold for the number of keys to go when get_next is called +// KVSeekHandler godoc +// +// @Summary Seek in kv table +// @Description KVSeekHandler is the api handler to seek to a particular key with the given prefix +// @Tags kv +// @Accept json +// @Produce json +// @Param export_request body KVExportRequest true "kv seek info" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} response +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/kv/seek [Post] func (h *Handler) KVSeekHandler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { @@ -48,7 +53,7 @@ func (h *Handler) KVSeekHandler(w http.ResponseWriter, r *http.Request) { } decoder := json.NewDecoder(r.Body) - var kvReq common.KVRequest + var kvReq KVExportRequest err := decoder.Decode(&kvReq) if err != nil { h.logger.Errorf("kv seek: could not decode arguments") @@ -58,15 +63,15 @@ func (h *Handler) KVSeekHandler(w http.ResponseWriter, r *http.Request) { podName := kvReq.PodName if podName == "" { - h.logger.Errorf("kv seek: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "kv seek: \"pod_name\" argument missing"}) + h.logger.Errorf("kv seek: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "kv seek: \"podName\" argument missing"}) return } name := kvReq.TableName if name == "" { - h.logger.Errorf("kv seek: \"table_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "kv seek: \"table_name\" argument missing"}) + h.logger.Errorf("kv seek: \"tableName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "kv seek: \"tableName\" argument missing"}) return } @@ -111,35 +116,47 @@ func (h *Handler) KVSeekHandler(w http.ResponseWriter, r *http.Request) { jsonhttp.OK(w, &response{Message: "seeked closest to the start key"}) } -// KVGetNextHandler is the api handler to get the key and value from the current seek position -// it takes only oneargument -// - table_name: the name of the kv table +// KVGetNextHandler godoc +// +// @Summary Get next value from last seek in kv table +// @Description KVGetNextHandler is the api handler to get the key and value from the current seek position +// @Tags kv +// @Accept json +// @Produce json +// @Param podName query string true "pod name" +// @Param tableName query string true "table name" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} KVResponse +// @Success 204 {object} response +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/kv/seek/next [Post] func (h *Handler) KVGetNextHandler(w http.ResponseWriter, r *http.Request) { - keys, ok := r.URL.Query()["pod_name"] + keys, ok := r.URL.Query()["podName"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("kv get_next: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, "kv get_next: \"pod_name\" argument missing") + h.logger.Errorf("kv get_next: \"podName\" argument missing") + jsonhttp.BadRequest(w, "kv get_next: \"podName\" argument missing") return } podName := keys[0] if podName == "" { - h.logger.Errorf("kv get_next: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, "kv get_next: \"pod_name\" argument missing") + h.logger.Errorf("kv get_next: \"podName\" argument missing") + jsonhttp.BadRequest(w, "kv get_next: \"podName\" argument missing") return } - keys, ok = r.URL.Query()["table_name"] + keys, ok = r.URL.Query()["tableName"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("kv get_next: \"table_name\" argument missing") - jsonhttp.BadRequest(w, "kv get_next: \"table_name\" argument missing") + h.logger.Errorf("kv get_next: \"tableName\" argument missing") + jsonhttp.BadRequest(w, "kv get_next: \"tableName\" argument missing") return } name := keys[0] if name == "" { - h.logger.Errorf("kv get_next: \"table_name\" argument missing") - jsonhttp.BadRequest(w, "kv get_next: \"table_name\" argument missing") + h.logger.Errorf("kv get_next: \"tableName\" argument missing") + jsonhttp.BadRequest(w, "kv get_next: \"tableName\" argument missing") return } diff --git a/pkg/api/pod_close.go b/pkg/api/pod_close.go index 92818c52..3435d3a7 100644 --- a/pkg/api/pod_close.go +++ b/pkg/api/pod_close.go @@ -20,16 +20,29 @@ import ( "encoding/json" "net/http" - "github.com/fairdatasociety/fairOS-dfs/cmd/common" - "resenje.org/jsonhttp" - "github.com/fairdatasociety/fairOS-dfs/pkg/cookie" "github.com/fairdatasociety/fairOS-dfs/pkg/dfs" p "github.com/fairdatasociety/fairOS-dfs/pkg/pod" + "resenje.org/jsonhttp" ) -// PodCloseHandler is the api handler to close an open pod -// it takes no arguments +type PodNameRequest struct { + PodName string `json:"podName,omitempty"` +} + +// PodCloseHandler godoc +// +// @Summary Close pod +// @Description PodCloseHandler is the api handler to close an open pod +// @Tags pod +// @Accept json +// @Produce json +// @Param pod_request body PodNameRequest true "pod name" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} response +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/pod/close [post] func (h *Handler) PodCloseHandler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { @@ -39,7 +52,7 @@ func (h *Handler) PodCloseHandler(w http.ResponseWriter, r *http.Request) { } decoder := json.NewDecoder(r.Body) - var podReq common.PodRequest + var podReq PodNameRequest err := decoder.Decode(&podReq) if err != nil { h.logger.Errorf("pod close: could not decode arguments") @@ -47,7 +60,11 @@ func (h *Handler) PodCloseHandler(w http.ResponseWriter, r *http.Request) { return } podName := podReq.PodName - + if podName == "" { + h.logger.Errorf("pod close: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "pod close: \"podName\" argument missing"}) + return + } // get values from cookie sessionId, err := cookie.GetSessionIdFromCookie(r) if err != nil { diff --git a/pkg/api/pod_delete.go b/pkg/api/pod_delete.go index fdda5bbf..825c897d 100644 --- a/pkg/api/pod_delete.go +++ b/pkg/api/pod_delete.go @@ -20,17 +20,25 @@ import ( "encoding/json" "net/http" - "github.com/fairdatasociety/fairOS-dfs/cmd/common" - "resenje.org/jsonhttp" "github.com/fairdatasociety/fairOS-dfs/pkg/cookie" "github.com/fairdatasociety/fairOS-dfs/pkg/dfs" ) -// PodDeleteHandler is the api handler to delete a pod -// it takes only one argument -// - pod_name: the name of the pod to delete +// PodDeleteHandler godoc +// +// @Summary Delete pod +// @Description PodDeleteHandler is the api handler to delete a pod +// @Tags pod +// @Accept json +// @Produce json +// @Param pod_request body PodNameRequest true "pod name and user password" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} response +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/pod/delete [delete] func (h *Handler) PodDeleteHandler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { @@ -40,7 +48,7 @@ func (h *Handler) PodDeleteHandler(w http.ResponseWriter, r *http.Request) { } decoder := json.NewDecoder(r.Body) - var podReq common.PodRequest + var podReq PodNameRequest err := decoder.Decode(&podReq) if err != nil { h.logger.Errorf("pod delete: could not decode arguments") @@ -50,15 +58,8 @@ func (h *Handler) PodDeleteHandler(w http.ResponseWriter, r *http.Request) { podName := podReq.PodName if podName == "" { - h.logger.Errorf("pod delete: \"pod_name\" parameter missing in cookie") - jsonhttp.BadRequest(w, &response{Message: "pod delete: \"pod_name\" parameter missing in cookie"}) - return - } - - password := podReq.Password - if password == "" { - h.logger.Errorf("pod delete: \"password\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "user delete: \"password\" argument missing"}) + h.logger.Errorf("pod delete: \"podName\" parameter missing in cookie") + jsonhttp.BadRequest(w, &response{Message: "pod delete: \"podName\" parameter missing in cookie"}) return } @@ -76,7 +77,7 @@ func (h *Handler) PodDeleteHandler(w http.ResponseWriter, r *http.Request) { } // delete pod - err = h.dfsAPI.DeletePod(podName, password, sessionId) + err = h.dfsAPI.DeletePod(podName, sessionId) if err != nil { if err == dfs.ErrUserNotLoggedIn { h.logger.Errorf("delete pod: %v", err) diff --git a/pkg/api/pod_ls.go b/pkg/api/pod_ls.go index 80940374..eee9d251 100644 --- a/pkg/api/pod_ls.go +++ b/pkg/api/pod_ls.go @@ -27,12 +27,22 @@ import ( ) type PodListResponse struct { - Pods []string `json:"pod_name"` - SharedPods []string `json:"shared_pod_name"` + Pods []string `json:"podName"` + SharedPods []string `json:"sharedPodName"` } -// PodListHandler is the api handler to list all pods -// it takes no arguments +// PodListHandler godoc +// +// @Summary List pods +// @Description PodListHandler is the api handler to list all pods +// @Tags pod +// @Accept json +// @Produce json +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} PodListResponse +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/pod/ls [get] func (h *Handler) PodListHandler(w http.ResponseWriter, r *http.Request) { // get values from cookie sessionId, err := cookie.GetSessionIdFromCookie(r) @@ -60,14 +70,12 @@ func (h *Handler) PodListHandler(w http.ResponseWriter, r *http.Request) { jsonhttp.InternalServerError(w, &response{Message: "ls pod: " + err.Error()}) return } - if pods == nil { pods = make([]string, 0) } if sharedPods == nil { sharedPods = make([]string, 0) } - w.Header().Set("Content-Type", " application/json") jsonhttp.OK(w, &PodListResponse{ Pods: pods, diff --git a/pkg/api/pod_new.go b/pkg/api/pod_new.go index acef8872..61641672 100644 --- a/pkg/api/pod_new.go +++ b/pkg/api/pod_new.go @@ -20,23 +20,25 @@ import ( "encoding/json" "net/http" - "github.com/fairdatasociety/fairOS-dfs/cmd/common" - - "resenje.org/jsonhttp" - "github.com/fairdatasociety/fairOS-dfs/pkg/cookie" "github.com/fairdatasociety/fairOS-dfs/pkg/dfs" p "github.com/fairdatasociety/fairOS-dfs/pkg/pod" + "resenje.org/jsonhttp" ) -type PodCreateResponse struct { - Reference string `json:"reference"` -} - -// PodCreateHandler is the api handler to create a new pod -// it takes two arguments -// - pod_name: the name of the pod to create -// - password: the password of the user +// PodCreateHandler godoc +// +// @Summary Create pod +// @Description PodCreateHandler is the api handler to create a new pod +// @Tags pod +// @Accept json +// @Produce json +// @Param pod_request body PodNameRequest true "pod name and user password" +// @Param Cookie header string true "cookie parameter" +// @Success 201 {object} response +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/pod/new [post] func (h *Handler) PodCreateHandler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { @@ -46,7 +48,7 @@ func (h *Handler) PodCreateHandler(w http.ResponseWriter, r *http.Request) { } decoder := json.NewDecoder(r.Body) - var podReq common.PodRequest + var podReq PodNameRequest err := decoder.Decode(&podReq) if err != nil { h.logger.Errorf("pod new: could not decode arguments") @@ -55,15 +57,9 @@ func (h *Handler) PodCreateHandler(w http.ResponseWriter, r *http.Request) { } pod := podReq.PodName - password := podReq.Password - if password == "" { - h.logger.Errorf("pod new: \"password\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "pod new: \"password\" argument missing"}) - return - } if pod == "" { - h.logger.Errorf("pod new: \"pod\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "pod new: \"pod\" argument missing"}) + h.logger.Errorf("pod new: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "pod new: \"podName\" argument missing"}) return } @@ -81,7 +77,7 @@ func (h *Handler) PodCreateHandler(w http.ResponseWriter, r *http.Request) { } // create pod - _, err = h.dfsAPI.CreatePod(pod, password, sessionId) + _, err = h.dfsAPI.CreatePod(pod, sessionId) if err != nil { if err == dfs.ErrUserNotLoggedIn || err == p.ErrInvalidPodName || diff --git a/pkg/api/pod_open.go b/pkg/api/pod_open.go index 728c1ba2..c6d6b3d2 100644 --- a/pkg/api/pod_open.go +++ b/pkg/api/pod_open.go @@ -20,7 +20,6 @@ import ( "encoding/json" "net/http" - "github.com/fairdatasociety/fairOS-dfs/cmd/common" "github.com/fairdatasociety/fairOS-dfs/pkg/cookie" "github.com/fairdatasociety/fairOS-dfs/pkg/dfs" p "github.com/fairdatasociety/fairOS-dfs/pkg/pod" @@ -31,10 +30,19 @@ type PodOpenResponse struct { Reference string `json:"reference"` } -// PodOpenHandler is the api handler to open a pod -// it takes two arguments -// - pod_name: the name of the pod to open -// - password: the password of the user +// PodOpenHandler godoc +// +// @Summary Open pod +// @Description PodOpenHandler is the api handler to open pod +// @Tags pod +// @Accept json +// @Produce json +// @Param pod_request body PodNameRequest true "pod name and user password" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} response +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/pod/open [post] func (h *Handler) PodOpenHandler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { @@ -44,7 +52,7 @@ func (h *Handler) PodOpenHandler(w http.ResponseWriter, r *http.Request) { } decoder := json.NewDecoder(r.Body) - var podReq common.PodRequest + var podReq PodNameRequest err := decoder.Decode(&podReq) if err != nil { h.logger.Errorf("pod open: could not decode arguments") @@ -52,10 +60,77 @@ func (h *Handler) PodOpenHandler(w http.ResponseWriter, r *http.Request) { return } pod := podReq.PodName + if pod == "" { + h.logger.Errorf("pod open: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "pod open: \"podName\" argument missing"}) + return + } + + // get values from cookie + sessionId, err := cookie.GetSessionIdFromCookie(r) + if err != nil { + h.logger.Errorf("pod open: invalid cookie: %v", err) + jsonhttp.BadRequest(w, &response{Message: ErrInvalidCookie.Error()}) + return + } + if sessionId == "" { + h.logger.Errorf("pod open: \"cookie-id\" parameter missing in cookie") + jsonhttp.BadRequest(w, &response{Message: "pod open: \"cookie-id\" parameter missing in cookie"}) + return + } + + // open pod + _, err = h.dfsAPI.OpenPod(pod, sessionId) + if err != nil { + if err == dfs.ErrUserNotLoggedIn || + err == p.ErrInvalidPodName { + h.logger.Errorf("pod open: %v", err) + jsonhttp.NotFound(w, &response{Message: "pod open: " + err.Error()}) + return + } + h.logger.Errorf("pod open: %v", err) + jsonhttp.InternalServerError(w, &response{Message: "pod open: " + err.Error()}) + return + } + + jsonhttp.OK(w, &response{Message: "pod opened successfully"}) +} + +// PodOpenAsyncHandler godoc +// +// @Summary Open pod +// @Description PodOpenAsyncHandler is the api handler to open pod asynchronously +// @Tags pod +// @Accept json +// @Produce json +// @Param pod_request body PodNameRequest true "pod name and user password" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} response +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/pod/open-async [post] +func (h *Handler) PodOpenAsyncHandler(w http.ResponseWriter, r *http.Request) { + contentType := r.Header.Get("Content-Type") + if contentType != jsonContentType { + h.logger.Errorf("pod open: invalid request body type") + jsonhttp.BadRequest(w, &response{Message: "pod open: invalid request body type"}) + return + } - // password will be empty in case of opening a shared pod - // so allow even if it is not set - password := podReq.Password + decoder := json.NewDecoder(r.Body) + var podReq PodNameRequest + err := decoder.Decode(&podReq) + if err != nil { + h.logger.Errorf("pod open: could not decode arguments") + jsonhttp.BadRequest(w, &response{Message: "pod open: could not decode arguments"}) + return + } + pod := podReq.PodName + if pod == "" { + h.logger.Errorf("pod open: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "pod open: \"podName\" argument missing"}) + return + } // get values from cookie sessionId, err := cookie.GetSessionIdFromCookie(r) @@ -71,7 +146,7 @@ func (h *Handler) PodOpenHandler(w http.ResponseWriter, r *http.Request) { } // open pod - _, err = h.dfsAPI.OpenPod(pod, password, sessionId) + _, err = h.dfsAPI.OpenPodAsync(r.Context(), pod, sessionId) if err != nil { if err == dfs.ErrUserNotLoggedIn || err == p.ErrInvalidPodName { diff --git a/pkg/api/pod_present.go b/pkg/api/pod_present.go index 6afd9fa6..5b467b06 100644 --- a/pkg/api/pod_present.go +++ b/pkg/api/pod_present.go @@ -8,19 +8,30 @@ import ( "resenje.org/jsonhttp" ) -// PodPresentHandler is the api handler to check if a pod is present -// it takes pod_name as query parameter +// PodPresentHandler godoc +// +// @Summary Is pod present +// @Description PodPresentHandler is the api handler to check if a pod is present +// @Tags pod +// @Accept json +// @Produce json +// @Param podName query string true "pod name" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} response +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/pod/present [get] func (h *Handler) PodPresentHandler(w http.ResponseWriter, r *http.Request) { - keys, ok := r.URL.Query()["pod_name"] + keys, ok := r.URL.Query()["podName"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("doc ls: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "doc ls: \"pod_name\" argument missing"}) + h.logger.Errorf("doc ls: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "doc ls: \"podName\" argument missing"}) return } podName := keys[0] if podName == "" { - h.logger.Errorf("doc ls: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "doc ls: \"pod_name\" argument missing"}) + h.logger.Errorf("doc ls: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "doc ls: \"podName\" argument missing"}) return } diff --git a/pkg/api/pod_share.go b/pkg/api/pod_share.go index 05dc9bb1..3ceb47e3 100644 --- a/pkg/api/pod_share.go +++ b/pkg/api/pod_share.go @@ -31,13 +31,22 @@ import ( ) type PodSharingReference struct { - Reference string `json:"pod_sharing_reference"` + Reference string `json:"podSharingReference"` } -// PodShareHandler is the api handler to share a pod to the public -// it takes two arguments -// - pod_name: the name of the pod to share -// - password: the password of the user +// PodShareHandler godoc +// +// @Summary Share pod +// @Description PodShareHandler is the api handler to share a pod to the public +// @Tags pod +// @Accept json +// @Produce json +// @Param pod_request body common.PodShareRequest true "pod name and user password" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} PodSharingReference +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/pod/share [post] func (h *Handler) PodShareHandler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { @@ -47,7 +56,7 @@ func (h *Handler) PodShareHandler(w http.ResponseWriter, r *http.Request) { } decoder := json.NewDecoder(r.Body) - var podReq common.PodRequest + var podReq common.PodShareRequest err := decoder.Decode(&podReq) if err != nil { h.logger.Errorf("pod share: could not decode arguments") @@ -57,8 +66,8 @@ func (h *Handler) PodShareHandler(w http.ResponseWriter, r *http.Request) { pod := podReq.PodName if pod == "" { - h.logger.Errorf("pod share: \"pod\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "pod share: \"pod\" argument missing"}) + h.logger.Errorf("pod share: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "pod share: \"podName\" argument missing"}) return } @@ -67,13 +76,6 @@ func (h *Handler) PodShareHandler(w http.ResponseWriter, r *http.Request) { sharedPodName = pod } - password := podReq.Password - if password == "" { - h.logger.Errorf("pod share: \"password\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "pod share: \"password\" argument missing"}) - return - } - // get values from cookie sessionId, err := cookie.GetSessionIdFromCookie(r) if err != nil { @@ -88,7 +90,7 @@ func (h *Handler) PodShareHandler(w http.ResponseWriter, r *http.Request) { } // fetch pod stat - sharingRef, err := h.dfsAPI.PodShare(pod, sharedPodName, password, sessionId) + sharingRef, err := h.dfsAPI.PodShare(pod, sharedPodName, sessionId) if err != nil { if err == dfs.ErrUserNotLoggedIn || err == p.ErrInvalidPodName { @@ -107,18 +109,31 @@ func (h *Handler) PodShareHandler(w http.ResponseWriter, r *http.Request) { }) } +// PodReceiveInfoHandler godoc +// +// @Summary Receive shared pod info +// @Description PodReceiveInfoHandler is the api handler to receive shared pod info from shared reference +// @Tags pod +// @Accept json +// @Produce json +// @Param sharingRef query string true "pod sharing reference" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} pod.ShareInfo +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/pod/receiveinfo [get] func (h *Handler) PodReceiveInfoHandler(w http.ResponseWriter, r *http.Request) { - keys, ok := r.URL.Query()["sharing_ref"] + keys, ok := r.URL.Query()["sharingRef"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("pod receive info: \"sharing_ref\" argument missing") - jsonhttp.BadRequest(w, "pod receive info: \"sharing_ref\" argument missing") + h.logger.Errorf("pod receive info: \"sharingRef\" argument missing") + jsonhttp.BadRequest(w, "pod receive info: \"sharingRef\" argument missing") return } sharingRefString := keys[0] if sharingRefString == "" { - h.logger.Errorf("pod receive info: \"ref\" argument missing") - jsonhttp.BadRequest(w, "pod receive info: \"ref\" argument missing") + h.logger.Errorf("pod receive info: \"sharingRef\" argument missing") + jsonhttp.BadRequest(w, "pod receive info: \"sharingRef\" argument missing") return } @@ -153,23 +168,37 @@ func (h *Handler) PodReceiveInfoHandler(w http.ResponseWriter, r *http.Request) jsonhttp.OK(w, shareInfo) } +// PodReceiveHandler godoc +// +// @Summary Receive shared pod +// @Description PodReceiveHandler is the api handler to receive shared pod from shared reference +// @Tags pod +// @Accept json +// @Produce json +// @Param sharingRef query string true "pod sharing reference" +// @Param sharedPodName query string false "pod name to be saved as" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} response +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/pod/receive [get] func (h *Handler) PodReceiveHandler(w http.ResponseWriter, r *http.Request) { - keys, ok := r.URL.Query()["sharing_ref"] + keys, ok := r.URL.Query()["sharingRef"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("pod receive: \"sharing_ref\" argument missing") - jsonhttp.BadRequest(w, "pod receive: \"sharing_ref\" argument missing") + h.logger.Errorf("pod receive: \"sharingRef\" argument missing") + jsonhttp.BadRequest(w, "pod receive: \"sharingRef\" argument missing") return } sharingRefString := keys[0] if sharingRefString == "" { - h.logger.Errorf("pod receive: \"ref\" argument missing") - jsonhttp.BadRequest(w, "pod receive: \"ref\" argument missing") + h.logger.Errorf("pod receive: \"sharingRef\" argument missing") + jsonhttp.BadRequest(w, "pod receive: \"sharingRef\" argument missing") return } sharedPodName := "" - keys, ok = r.URL.Query()["shared_pod_name"] + keys, ok = r.URL.Query()["sharedPodName"] if ok && len(keys[0]) == 1 { sharedPodName = keys[0] } @@ -202,5 +231,5 @@ func (h *Handler) PodReceiveHandler(w http.ResponseWriter, r *http.Request) { } addedStr := fmt.Sprintf("public pod %q, added as shared pod", pi.GetPodName()) - jsonhttp.OK(w, addedStr) + jsonhttp.OK(w, &response{Message: addedStr}) } diff --git a/pkg/api/pod_stat.go b/pkg/api/pod_stat.go index cd6618b1..e3dfb05b 100644 --- a/pkg/api/pod_stat.go +++ b/pkg/api/pod_stat.go @@ -27,25 +27,35 @@ import ( ) type PodStatResponse struct { - PodName string `json:"pod_name"` + PodName string `json:"podName"` PodAddress string `json:"address"` } -// PodStatHandler is the api handler to get information about a pod -// it takes only one argument -// - pod_name: the name of the pod to get the info +// PodStatHandler godoc +// +// @Summary Stats for pod +// @Description PodStatHandler is the api handler get information about a pod +// @Tags pod +// @Accept json +// @Produce json +// @Param podName query string true "pod name" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} PodStatResponse +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/pod/stat [get] func (h *Handler) PodStatHandler(w http.ResponseWriter, r *http.Request) { - keys, ok := r.URL.Query()["pod_name"] + keys, ok := r.URL.Query()["podName"] if !ok || len(keys[0]) < 1 { - h.logger.Errorf("pod stat: \"pod_name\" argument missing") + h.logger.Errorf("pod stat: \"podName\" argument missing") jsonhttp.BadRequest(w, &response{Message: "pod stat: \"pod_name\" argument missing"}) return } pod := keys[0] if pod == "" { - h.logger.Errorf("pod stat: \"pod_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "pod stat: \"pod_name\" argument missing"}) + h.logger.Errorf("pod stat: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "pod stat: \"podName\" argument missing"}) return } // get values from cookie diff --git a/pkg/api/pod_sync.go b/pkg/api/pod_sync.go index 3df06dfc..69c416fd 100644 --- a/pkg/api/pod_sync.go +++ b/pkg/api/pod_sync.go @@ -20,7 +20,6 @@ import ( "encoding/json" "net/http" - "github.com/fairdatasociety/fairOS-dfs/cmd/common" "resenje.org/jsonhttp" "github.com/fairdatasociety/fairOS-dfs/pkg/cookie" @@ -28,8 +27,19 @@ import ( p "github.com/fairdatasociety/fairOS-dfs/pkg/pod" ) -// PodSyncHandler is the api handler to sync a pod's contents from the Swarm network -// it takes no arguments +// PodSyncHandler godoc +// +// @Summary Sync pod +// @Description PodSyncHandler is the api handler to sync a pod's content +// @Tags pod +// @Accept json +// @Produce json +// @Param pod_request body PodNameRequest true "pod name" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} response +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/pod/sync [post] func (h *Handler) PodSyncHandler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { @@ -39,7 +49,7 @@ func (h *Handler) PodSyncHandler(w http.ResponseWriter, r *http.Request) { } decoder := json.NewDecoder(r.Body) - var podReq common.PodRequest + var podReq PodNameRequest err := decoder.Decode(&podReq) if err != nil { h.logger.Errorf("pod sync: could not decode arguments") @@ -47,7 +57,11 @@ func (h *Handler) PodSyncHandler(w http.ResponseWriter, r *http.Request) { return } podName := podReq.PodName - + if podName == "" { + h.logger.Errorf("pod sync: \"podName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "pod sync: \"podName\" argument missing"}) + return + } // get values from cookie sessionId, err := cookie.GetSessionIdFromCookie(r) if err != nil { @@ -60,7 +74,6 @@ func (h *Handler) PodSyncHandler(w http.ResponseWriter, r *http.Request) { jsonhttp.BadRequest(w, &response{Message: "pod sync: \"cookie-id\" parameter missing in cookie"}) return } - // fetch pods and list them err = h.dfsAPI.SyncPod(podName, sessionId) if err != nil { diff --git a/pkg/api/user_del.go b/pkg/api/user_del.go index 1a1274be..531d1dc4 100644 --- a/pkg/api/user_del.go +++ b/pkg/api/user_del.go @@ -26,9 +26,11 @@ import ( "resenje.org/jsonhttp" ) -// UserDeleteHandler is the api handler to delete a user -// it takes only one argument -// - password: the password of the user +// UserDeleteHandler godoc +// +// @Tags user +// @Deprecated +// @Router /v1/user/delete [post] func (h *Handler) UserDeleteHandler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { @@ -38,7 +40,7 @@ func (h *Handler) UserDeleteHandler(w http.ResponseWriter, r *http.Request) { } decoder := json.NewDecoder(r.Body) - var userReq common.UserRequest + var userReq common.UserSignupRequest err := decoder.Decode(&userReq) if err != nil { h.logger.Errorf("user signup: could not decode arguments") @@ -65,45 +67,39 @@ func (h *Handler) UserDeleteHandler(w http.ResponseWriter, r *http.Request) { jsonhttp.BadRequest(w, &response{Message: "user delete: \"cookie-id\" parameter missing in cookie"}) return } + jsonhttp.BadRequest(w, &response{Message: "user delete: deprecated"}) +} - // delete user - err = h.dfsAPI.DeleteUser(password, sessionId) - if err != nil { - if err == u.ErrInvalidUserName || - err == u.ErrInvalidPassword || - err == u.ErrUserNotLoggedIn { - h.logger.Errorf("user delete: %v", err) - jsonhttp.BadRequest(w, &response{Message: "user delete: " + err.Error()}) - return - } - h.logger.Errorf("user delete: %v", err) - jsonhttp.InternalServerError(w, &response{Message: "user delete: " + err.Error()}) - return - } - - // clear cookie - cookie.ClearSession(w) - - jsonhttp.OK(w, &response{Message: "user deleted successfully"}) +type UserDeleteRequest struct { + Password string `json:"password,omitempty"` } -// UserDeleteV2Handler is the api handler to delete a user -// it takes only one argument -// - password: the password of the user +// UserDeleteV2Handler godoc +// +// @Summary Delete user for ENS based authentication +// @Description deletes user info from swarm +// @Tags user +// @Produce json +// @Param UserDeleteRequest body UserDeleteRequest true "user delete request" +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} response +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v2/user/delete [delete] func (h *Handler) UserDeleteV2Handler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { - h.logger.Errorf("user signup: invalid request body type") - jsonhttp.BadRequest(w, "user signup: invalid request body type") + h.logger.Errorf("user delete: invalid request body type") + jsonhttp.BadRequest(w, "user delete: invalid request body type") return } decoder := json.NewDecoder(r.Body) - var userReq common.UserRequest + var userReq UserDeleteRequest err := decoder.Decode(&userReq) if err != nil { - h.logger.Errorf("user signup: could not decode arguments") - jsonhttp.BadRequest(w, "user signup: could not decode arguments") + h.logger.Errorf("user delete: could not decode arguments") + jsonhttp.BadRequest(w, "user delete: could not decode arguments") return } @@ -134,16 +130,16 @@ func (h *Handler) UserDeleteV2Handler(w http.ResponseWriter, r *http.Request) { err == u.ErrInvalidPassword || err == u.ErrUserNotLoggedIn { h.logger.Errorf("user delete: %v", err) - jsonhttp.BadRequest(w, "user delete: "+err.Error()) + jsonhttp.BadRequest(w, &response{Message: "user delete: " + err.Error()}) return } h.logger.Errorf("user delete: %v", err) - jsonhttp.InternalServerError(w, "user delete: "+err.Error()) + jsonhttp.InternalServerError(w, &response{Message: "user delete: " + err.Error()}) return } // clear cookie cookie.ClearSession(w) - jsonhttp.OK(w, "user deleted successfully") + jsonhttp.OK(w, &response{Message: "user deleted successfully"}) } diff --git a/pkg/api/user_export.go b/pkg/api/user_export.go index a3a22083..de31bbf4 100644 --- a/pkg/api/user_export.go +++ b/pkg/api/user_export.go @@ -21,12 +21,15 @@ import ( ) type UserExportResponse struct { - Name string `json:"user_name"` + Name string `json:"userName"` Address string `json:"address"` } -// ExportUserHandler is the api handler to export a user information -// it takes no arguments +// ExportUserHandler godoc +// +// @Tags user +// @Deprecated +// @Router /v1/user/export [post] func (h *Handler) ExportUserHandler(w http.ResponseWriter, r *http.Request) { // get values from cookie sessionId, err := cookie.GetSessionIdFromCookie(r) @@ -40,16 +43,5 @@ func (h *Handler) ExportUserHandler(w http.ResponseWriter, r *http.Request) { jsonhttp.BadRequest(w, &response{Message: "user export: \"cookie-id\" parameter missing in cookie"}) return } - - name, address, err := h.dfsAPI.ExportUser(sessionId) - if err != nil { - h.logger.Errorf("user export: %v", err) - jsonhttp.InternalServerError(w, &response{Message: "user export: " + err.Error()}) - return - } - - jsonhttp.OK(w, &UserExportResponse{ - Name: name, - Address: address, - }) + jsonhttp.BadRequest(w, &response{Message: "user export: deprecated"}) } diff --git a/pkg/api/user_islogin.go b/pkg/api/user_islogin.go index b5f8622d..8a131783 100644 --- a/pkg/api/user_islogin.go +++ b/pkg/api/user_islogin.go @@ -26,11 +26,19 @@ type LoginStatus struct { LoggedIn bool `json:"loggedin"` } -// IsUserLoggedInHandler is the api handler to check if a user is logged in or not -// it takes one argument -// -user_name: the user name to check if it logged in or not +// IsUserLoggedInHandler godoc +// +// @Summary Is user logged-in +// @Description Check if the given user is logged-in +// @Tags user +// @Accept json +// @Produce json +// @Param userName query string true "user name" +// @Success 200 {object} LoginStatus +// @Failure 400 {object} response +// @Router /v1/user/isloggedin [get] func (h *Handler) IsUserLoggedInHandler(w http.ResponseWriter, r *http.Request) { - keys, ok := r.URL.Query()["user_name"] + keys, ok := r.URL.Query()["userName"] if !ok || len(keys[0]) < 1 { h.logger.Errorf("user isloggedin: \"user_name\" argument missing") jsonhttp.BadRequest(w, &response{Message: "user isloggedin: \"user_name\" argument missing"}) @@ -39,8 +47,8 @@ func (h *Handler) IsUserLoggedInHandler(w http.ResponseWriter, r *http.Request) user := keys[0] if user == "" { - h.logger.Errorf("user isloggedin: \"user\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "user isloggedin: \"user\" argument missing"}) + h.logger.Errorf("user isloggedin: \"user_name\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "user isloggedin: \"user_name\" argument missing"}) return } diff --git a/pkg/api/user_login.go b/pkg/api/user_login.go index d7d4da92..fce2a7e4 100644 --- a/pkg/api/user_login.go +++ b/pkg/api/user_login.go @@ -18,6 +18,7 @@ package api import ( "encoding/json" + "errors" "net/http" "github.com/fairdatasociety/fairOS-dfs/pkg/cookie" @@ -27,10 +28,27 @@ import ( "resenje.org/jsonhttp" ) -// UserLoginV2Handler is the api handler to login a user -// it takes two arguments -// - user_name: the name of the user to login -// - password: the password of the user +type UserLoginResponse struct { + Address string `json:"address"` + NameHash string `json:"nameHash,omitempty"` + PublicKey string `json:"publicKey,omitempty"` + Message string `json:"message,omitempty"` +} + +// UserLoginV2Handler godoc +// +// @Summary Login User +// @Description login user with the new ENS based authentication +// @Tags user +// @Accept json +// @Produce json +// @Param user_request body common.UserLoginRequest true "user name" +// @Success 200 {object} UserLoginResponse +// @Failure 400 {object} response +// @Failure 404 {object} response +// @Failure 500 {object} response +// @Header 200 {string} Set-Cookie "fairos-dfs session" +// @Router /v2/user/login [post] func (h *Handler) UserLoginV2Handler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { @@ -40,7 +58,7 @@ func (h *Handler) UserLoginV2Handler(w http.ResponseWriter, r *http.Request) { } decoder := json.NewDecoder(r.Body) - var userReq common.UserRequest + var userReq common.UserLoginRequest err := decoder.Decode(&userReq) if err != nil { h.logger.Errorf("user login: could not decode arguments") @@ -51,8 +69,8 @@ func (h *Handler) UserLoginV2Handler(w http.ResponseWriter, r *http.Request) { user := userReq.UserName password := userReq.Password if user == "" { - h.logger.Errorf("user login: \"user_name\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "user login: \"user_name\" argument missing"}) + h.logger.Errorf("user login: \"userName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "user login: \"userName\" argument missing"}) return } if password == "" { @@ -64,6 +82,11 @@ func (h *Handler) UserLoginV2Handler(w http.ResponseWriter, r *http.Request) { // login user ui, nameHash, publicKey, err := h.dfsAPI.LoginUserV2(user, password, "") if err != nil { + if errors.Is(err, u.ErrUserNameNotFound) { + h.logger.Errorf("user login: %v", err) + jsonhttp.NotFound(w, &response{Message: "user login: " + err.Error()}) + return + } if err == u.ErrUserAlreadyLoggedIn || err == u.ErrInvalidUserName || err == u.ErrInvalidPassword { diff --git a/pkg/api/user_login_v1.go b/pkg/api/user_login_v1.go index 97d576c4..d69890f6 100644 --- a/pkg/api/user_login_v1.go +++ b/pkg/api/user_login_v1.go @@ -21,15 +21,15 @@ import ( "net/http" "github.com/fairdatasociety/fairOS-dfs/cmd/common" - "github.com/fairdatasociety/fairOS-dfs/pkg/cookie" - u "github.com/fairdatasociety/fairOS-dfs/pkg/user" "resenje.org/jsonhttp" ) -// UserLoginHandler is the api handler to login a user -// it takes two arguments -// - user_name: the name of the user to login -// - password: the password of the user +// UserLoginHandler godoc +// +// @Tags user +// +// @Deprecated +// @Router /v1/user/login [post] func (h *Handler) UserLoginHandler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { @@ -39,7 +39,7 @@ func (h *Handler) UserLoginHandler(w http.ResponseWriter, r *http.Request) { } decoder := json.NewDecoder(r.Body) - var userReq common.UserRequest + var userReq common.UserSignupRequest err := decoder.Decode(&userReq) if err != nil { h.logger.Errorf("user login: could not decode arguments") @@ -59,28 +59,5 @@ func (h *Handler) UserLoginHandler(w http.ResponseWriter, r *http.Request) { jsonhttp.BadRequest(w, &response{Message: "user login: \"password\" argument missing"}) return } - - // login user - ui, err := h.dfsAPI.LoginUser(user, password, "") - if err != nil { - if err == u.ErrUserAlreadyLoggedIn || - err == u.ErrInvalidUserName || - err == u.ErrInvalidPassword { - h.logger.Errorf("user login: %v", err) - jsonhttp.BadRequest(w, &response{Message: "user login: " + err.Error()}) - return - } - h.logger.Errorf("user login: %v", err) - jsonhttp.InternalServerError(w, &response{Message: "user login: " + err.Error()}) - return - } - - err = cookie.SetSession(ui.GetSessionId(), w, h.cookieDomain) - if err != nil { - h.logger.Errorf("user login: %v", err) - jsonhttp.InternalServerError(w, &response{Message: "user login: " + err.Error()}) - return - } - - jsonhttp.OK(w, &response{Message: "user logged-in successfully"}) + jsonhttp.BadRequest(w, &response{Message: "user login: deprecated"}) } diff --git a/pkg/api/user_logout.go b/pkg/api/user_logout.go index dff96d47..da007d08 100644 --- a/pkg/api/user_logout.go +++ b/pkg/api/user_logout.go @@ -24,8 +24,17 @@ import ( "resenje.org/jsonhttp" ) -// UserLogoutHandler is the api handler to logout a user -// it takes no arguments +// UserLogoutHandler godoc +// +// @Summary Logout +// @Description logs-out user +// @Tags user +// @Accept json +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} response +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/user/logout [post] func (h *Handler) UserLogoutHandler(w http.ResponseWriter, r *http.Request) { // get values from cookie sessionId, err := cookie.GetSessionIdFromCookie(r) diff --git a/pkg/api/user_migrate.go b/pkg/api/user_migrate.go index 3585751f..ba2c1303 100644 --- a/pkg/api/user_migrate.go +++ b/pkg/api/user_migrate.go @@ -6,7 +6,6 @@ import ( "github.com/fairdatasociety/fairOS-dfs/cmd/common" "github.com/fairdatasociety/fairOS-dfs/pkg/cookie" - u "github.com/fairdatasociety/fairOS-dfs/pkg/user" "resenje.org/jsonhttp" ) @@ -22,7 +21,7 @@ func (h *Handler) UserMigrateHandler(w http.ResponseWriter, r *http.Request) { } decoder := json.NewDecoder(r.Body) - var userReq common.UserRequest + var userReq common.UserSignupRequest err := decoder.Decode(&userReq) if err != nil { h.logger.Errorf("user migrate: could not decode arguments") @@ -50,24 +49,5 @@ func (h *Handler) UserMigrateHandler(w http.ResponseWriter, r *http.Request) { return } - // migrate user - username := userReq.UserName - err = h.dfsAPI.MigrateUser(username, password, sessionId) - if err != nil { - if err == u.ErrInvalidUserName || - err == u.ErrInvalidPassword || - err == u.ErrUserNotLoggedIn { - h.logger.Errorf("user migrate: %v", err) - jsonhttp.BadRequest(w, &response{Message: "user migrate: " + err.Error()}) - return - } - h.logger.Errorf("user migrate: %v", err) - jsonhttp.InternalServerError(w, &response{Message: "user migrate: " + err.Error()}) - return - } - - // clear cookie - cookie.ClearSession(w) - - jsonhttp.OK(w, &response{Message: "user migrated successfully"}) + jsonhttp.BadRequest(w, &response{Message: "user migrate: deprecated"}) } diff --git a/pkg/api/user_present.go b/pkg/api/user_present.go index b26345dc..59ff6b4f 100644 --- a/pkg/api/user_present.go +++ b/pkg/api/user_present.go @@ -26,9 +26,11 @@ type PresentResponse struct { Present bool `json:"present"` } -// UserPresentHandler is the api handler to check if a gien user name is present -// it takes only one argument -// - user_name: the name of the user to check +// UserPresentHandler godoc +// +// @Tags user +// @Deprecated +// @Router /v1/user/present [get] func (h *Handler) UserPresentHandler(w http.ResponseWriter, r *http.Request) { keys, ok := r.URL.Query()["user_name"] if !ok || len(keys[0]) < 1 { @@ -43,25 +45,21 @@ func (h *Handler) UserPresentHandler(w http.ResponseWriter, r *http.Request) { jsonhttp.BadRequest(w, &response{Message: "user present: \"user\" argument missing"}) return } - - w.Header().Set("Content-Type", " application/json") - // check if user is present - if h.dfsAPI.IsUserNameAvailable(user) { - jsonhttp.OK(w, &PresentResponse{ - Present: true, - }) - } else { - jsonhttp.OK(w, &PresentResponse{ - Present: false, - }) - } + jsonhttp.BadRequest(w, &response{Message: "user present: deprecated"}) } -// UserPresentV2Handler is the api handler to check if a gien user name is present -// it takes only one argument -// - user_name: the name of the user to check +// UserPresentV2Handler godoc +// +// @Summary Check if user is present +// @Description checks if the new user is present in the new ENS based authentication +// @Tags user +// @Produce json +// @Param userName query string true "user name" +// @Success 200 {object} PresentResponse +// @Failure 400 {object} response +// @Router /v2/user/present [get] func (h *Handler) UserPresentV2Handler(w http.ResponseWriter, r *http.Request) { - keys, ok := r.URL.Query()["user_name"] + keys, ok := r.URL.Query()["userName"] if !ok || len(keys[0]) < 1 { h.logger.Errorf("user present: \"user_name\" argument missing") jsonhttp.BadRequest(w, &response{Message: "user present: \"user_name\" argument missing"}) @@ -70,8 +68,8 @@ func (h *Handler) UserPresentV2Handler(w http.ResponseWriter, r *http.Request) { user := keys[0] if user == "" { - h.logger.Errorf("user present: \"user\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "user present: \"user\" argument missing"}) + h.logger.Errorf("user present: \"user_name\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "user present: \"user_name\" argument missing"}) return } diff --git a/pkg/api/user_signup.go b/pkg/api/user_signup.go index 7226bca0..436a2aa4 100644 --- a/pkg/api/user_signup.go +++ b/pkg/api/user_signup.go @@ -33,17 +33,25 @@ var ( type UserSignupResponse struct { Address string `json:"address"` - Mnemonic string `json:"mnemonic,omitempty"` - NameHash string `json:"name_hash,omitempty"` - PublicKey string `json:"public_key,omitempty"` + NameHash string `json:"nameHash,omitempty"` + PublicKey string `json:"publicKey,omitempty"` Message string `json:"message,omitempty"` + Mnemonic string `json:"mnemonic,omitempty"` } -// UserSignupV2Handler is the api handler to create new user -// it takes two mandatory arguments and one optional argument -// - user_name: the name of the user to create -// - password: the password of the user -// * mnemonic: a 12 word mnemonic to use to create the hd wallet of the user +// UserSignupV2Handler godoc +// +// @Summary Register New User +// @Description registers new user with the new ENS based authentication +// @Tags user +// @Accept json +// @Produce json +// @Param user_request body common.UserSignupRequest true "user name" +// @Success 201 {object} UserSignupResponse +// @Failure 400 {object} response +// @Failure 402 {object} UserSignupResponse +// @Failure 500 {object} response +// @Router /v2/user/signup [post] func (h *Handler) UserSignupV2Handler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { @@ -53,7 +61,7 @@ func (h *Handler) UserSignupV2Handler(w http.ResponseWriter, r *http.Request) { } decoder := json.NewDecoder(r.Body) - var userReq common.UserRequest + var userReq common.UserSignupRequest err := decoder.Decode(&userReq) if err != nil { h.logger.Errorf("user signup: could not decode arguments") @@ -65,8 +73,8 @@ func (h *Handler) UserSignupV2Handler(w http.ResponseWriter, r *http.Request) { password := userReq.Password mnemonic := userReq.Mnemonic if user == "" { - h.logger.Errorf("user signup: \"user\" argument missing") - jsonhttp.BadRequest(w, &response{Message: "user signup: \"user\" argument missing"}) + h.logger.Errorf("user signup: \"userName\" argument missing") + jsonhttp.BadRequest(w, &response{Message: "user signup: \"userName\" argument missing"}) return } if password == "" { diff --git a/pkg/api/user_signup_v1.go b/pkg/api/user_signup_v1.go index 5e508109..14d1e3d5 100644 --- a/pkg/api/user_signup_v1.go +++ b/pkg/api/user_signup_v1.go @@ -21,16 +21,14 @@ import ( "net/http" "github.com/fairdatasociety/fairOS-dfs/cmd/common" - "github.com/fairdatasociety/fairOS-dfs/pkg/cookie" - u "github.com/fairdatasociety/fairOS-dfs/pkg/user" "resenje.org/jsonhttp" ) -// UserSignupHandler is the api handler to create new user -// it takes two mandatory arguments and one optional argument -// - user_name: the name of the user to create -// - password: the password of the user -// * mnemonic: a 12 word mnemonic to use to create the hd wallet of the user +// UserSignupHandler godoc +// +// @Tags user +// @Deprecated +// @Router /v1/user/signup [post] func (h *Handler) UserSignupHandler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if contentType != jsonContentType { @@ -40,7 +38,7 @@ func (h *Handler) UserSignupHandler(w http.ResponseWriter, r *http.Request) { } decoder := json.NewDecoder(r.Body) - var userReq common.UserRequest + var userReq common.UserSignupRequest err := decoder.Decode(&userReq) if err != nil { h.logger.Errorf("user signup: could not decode arguments") @@ -50,7 +48,6 @@ func (h *Handler) UserSignupHandler(w http.ResponseWriter, r *http.Request) { user := userReq.UserName password := userReq.Password - mnemonic := userReq.Mnemonic if user == "" { h.logger.Errorf("user signup: \"user\" argument missing") jsonhttp.BadRequest(w, &response{Message: "user signup: \"user\" argument missing"}) @@ -61,37 +58,5 @@ func (h *Handler) UserSignupHandler(w http.ResponseWriter, r *http.Request) { jsonhttp.BadRequest(w, &response{Message: "user signup: \"password\" argument missing"}) return } - - // create user - address, createdMnemonic, ui, err := h.dfsAPI.CreateUser(user, password, mnemonic, "") - if err != nil { - if err == u.ErrUserAlreadyPresent { - h.logger.Errorf("user signup: %v", err) - jsonhttp.BadRequest(w, &response{Message: "user signup: " + err.Error()}) - return - } - h.logger.Errorf("user signup: %v", err) - jsonhttp.InternalServerError(w, &response{Message: "user signup: " + err.Error()}) - return - } - - err = cookie.SetSession(ui.GetSessionId(), w, h.cookieDomain) - if err != nil { - h.logger.Errorf("user signup: %v", err) - jsonhttp.InternalServerError(w, &response{Message: "user signup: " + err.Error()}) - return - } - - if mnemonic == "" { - mnemonic = createdMnemonic - } else { - mnemonic = "" - } - - // send the response - w.Header().Set("Content-Type", " application/json") - jsonhttp.Created(w, &UserSignupResponse{ - Address: address, - Mnemonic: mnemonic, - }) + jsonhttp.BadRequest(w, &response{Message: "user signup: deprecated"}) } diff --git a/pkg/api/user_stat.go b/pkg/api/user_stat.go index cd3d49c3..f8b3a482 100644 --- a/pkg/api/user_stat.go +++ b/pkg/api/user_stat.go @@ -23,8 +23,17 @@ import ( "resenje.org/jsonhttp" ) -// UserStatHandler is the api handler to get the information about a user -// it takes no arguments +// UserStatHandler godoc +// +// @Summary User stat +// @Description show user stats +// @Tags user +// @Accept json +// @Param Cookie header string true "cookie parameter" +// @Success 200 {object} user.Stat +// @Failure 400 {object} response +// @Failure 500 {object} response +// @Router /v1/user/stat [get] func (h *Handler) UserStatHandler(w http.ResponseWriter, r *http.Request) { // get values from cookie sessionId, err := cookie.GetSessionIdFromCookie(r) diff --git a/pkg/api/ws.go b/pkg/api/ws.go index 323723dc..18210086 100644 --- a/pkg/api/ws.go +++ b/pkg/api/ws.go @@ -5,22 +5,26 @@ import ( "encoding/json" "fmt" "io" - "mime/multipart" "net/http" "path/filepath" "strconv" "strings" "time" + "github.com/dustin/go-humanize" "github.com/fairdatasociety/fairOS-dfs/cmd/common" + "github.com/fairdatasociety/fairOS-dfs/pkg/collection" + "github.com/fairdatasociety/fairOS-dfs/pkg/dir" + "github.com/fairdatasociety/fairOS-dfs/pkg/file" "github.com/fairdatasociety/fairOS-dfs/pkg/logging" + "github.com/fairdatasociety/fairOS-dfs/pkg/utils" "github.com/gorilla/websocket" "github.com/sirupsen/logrus" ) -const ( - wsChunkLimit = 1000000 -) +// const ( +// wsChunkLimit = 1000000 +// ) var ( readDeadline = 4 * time.Second @@ -74,8 +78,8 @@ func (h *Handler) handleEvents(conn *websocket.Conn) error { return } if err := conn.WriteMessage(websocket.PingMessage, []byte{}); err != nil { - h.logger.Debugf("ws event handler: upload: failed to send ping: %v", err) - h.logger.Error("ws event handler: upload: failed to send ping") + h.logger.Debugf("ws event handler: failed to send ping: %v", err) + h.logger.Error("ws event handler: failed to send ping") return } } @@ -91,150 +95,17 @@ func (h *Handler) handleEvents(conn *websocket.Conn) error { return nil }) - var cookie []string - - // create a http request for feeding the http handler - newRequest := func(method, url string, buf []byte) (*http.Request, error) { - httpReq, err := http.NewRequest(method, url, bytes.NewBuffer(buf)) - if err != nil { - return nil, err - } - httpReq.Header.Add("Content-Type", "application/json") - httpReq.Header.Add("Content-Length", strconv.Itoa(len(buf))) - if cookie != nil { - httpReq.Header.Set("Cookie", cookie[0]) - } - return httpReq, nil - } - - // create a file upload request for feeding the http handler - newMultipartRequestWithBinaryMessage := func(params interface{}, formField, method, url string, streaming bool) (*http.Request, error) { - jsonBytes, _ := json.Marshal(params) - args := make(map[string]string) - if err := json.Unmarshal(jsonBytes, &args); err != nil { - h.logger.Debugf("ws event handler: multipart rqst w/ body: failed to read params: %v", err) - h.logger.Error("ws event handler: multipart rqst w/ body: failed to read params") - return nil, err - } - - if err != nil { - return nil, err - } - body := new(bytes.Buffer) - writer := multipart.NewWriter(body) - fileName := "" - compression := "" - contentLength := "0" - // Add parameters - for k, v := range args { - if k == "file_name" { - fileName = v - } else if k == "content_length" { - contentLength = v - } else if k == "compression" { - compression = strings.ToLower(compression) - } - err := writer.WriteField(k, v) - if err != nil { - h.logger.Debugf("ws event handler: multipart rqst w/ body: failed to write fields in form: %v", err) - h.logger.Error("ws event handler: multipart rqst w/ body: failed to write fields in form") - return nil, err - } - } - - part, err := writer.CreateFormFile(formField, fileName) - if err != nil { - h.logger.Debugf("ws event handler: multipart rqst w/ body: failed to create files field in form: %v", err) - h.logger.Error("ws event handler: multipart rqst w/ body: failed to create files field in form") - return nil, err - } - if streaming { - if contentLength == "" || contentLength == "0" { - h.logger.Warning("streaming needs \"content_length\"") - return nil, fmt.Errorf("streaming needs \"content_length\"") - } - var totalRead int64 = 0 - for { - mt, reader, err := conn.NextReader() - if err != nil { - h.logger.Debugf("ws event handler: multipart rqst w/ body: failed to read next message: %v", err) - h.logger.Error("ws event handler: multipart rqst w/ body: failed to read next message") - return nil, err - } - if mt != websocket.BinaryMessage { - h.logger.Warning("non binary message", mt) - return nil, fmt.Errorf("received non binary message inside upload stream aborting") - } - n, err := io.Copy(part, reader) - if err != nil { - h.logger.Debugf("ws event handler: multipart rqst w/ body: failed to read file: %v", err) - h.logger.Error("ws event handler: multipart rqst w/ body: failed to read file") - return nil, err - } - totalRead += n - if fmt.Sprintf("%d", totalRead) == contentLength { - h.logger.Debug("streamed full content") - break - } - } - } else { - mt, reader, err := conn.NextReader() - if err != nil { - h.logger.Debugf("ws event handler: multipart rqst w/ body: failed to read next message: %v", err) - h.logger.Error("ws event handler: multipart rqst w/ body: failed to read next message") - return nil, err - } - if mt != websocket.BinaryMessage { - h.logger.Warning("non binary message", mt) - return nil, fmt.Errorf("file content should be as binary message") - } - _, err = io.Copy(part, reader) - if err != nil { - h.logger.Debugf("ws event handler: multipart rqst w/ body: failed to read file: %v", err) - h.logger.Error("ws event handler: multipart rqst w/ body: failed to read file") - return nil, err - } - } - - err = writer.Close() - if err != nil { - h.logger.Debugf("ws event handler: multipart rqst w/ body: failed to close writer: %v", err) - h.logger.Error("ws event handler: multipart rqst w/ body: failed to close writer") - return nil, err - } - - httpReq, err := http.NewRequest(method, url, body) - if err != nil { - h.logger.Debugf("ws event handler: multipart rqst w/ body: failed to create http request: %v", err) - h.logger.Error("ws event handler: multipart rqst w/ body: failed to create http request") - return nil, err - } - contentType := fmt.Sprintf("multipart/form-data;boundary=%v", writer.Boundary()) - httpReq.Header.Set("Content-Type", contentType) - if cookie != nil { - httpReq.Header.Set("Cookie", cookie[0]) - } - if compression != "" { - httpReq.Header.Set(CompressionHeader, compression) - } - return httpReq, nil - } - - // create a http request for file download - newMultipartRequest := func(method, url, boundary string, r io.Reader) (*http.Request, error) { - httpReq, err := http.NewRequest(method, url, r) - if err != nil { - return nil, err - } - contentType := fmt.Sprintf("multipart/form-data;boundary=%v", boundary) - httpReq.Header.Set("Content-Type", contentType) - if cookie != nil { - httpReq.Header.Set("Cookie", cookie[0]) + var sessionID string + logEventDescription := func(url string, startTime time.Time, status int, logger logging.Logger) { + fields := logrus.Fields{ + "uri": url, + "duration": time.Since(startTime).String(), + "status": status, } - return httpReq, nil + logger.WithFields(fields).Log(logrus.DebugLevel, "ws event response: ") } - respondWithError := func(response *common.WebsocketResponse, originalErr error) { + response.StatusCode = http.StatusInternalServerError if originalErr == nil { return } @@ -244,35 +115,24 @@ func (h *Handler) handleEvents(conn *websocket.Conn) error { message := map[string]interface{}{} message["message"] = originalErr.Error() - response.Params = &message - response.StatusCode = http.StatusInternalServerError - if err := conn.SetWriteDeadline(time.Now().Add(writeDeadline)); err != nil { + messageBytes, err := json.Marshal(message) + if err != nil { return } - if err := conn.WriteMessage(websocket.TextMessage, response.Marshal()); err != nil { - h.logger.Debugf("ws event handler: upload: failed to write error response: %v", err) - h.logger.Error("ws event handler: upload: failed to write error response") + _, err = response.WriteJson(messageBytes) + if err != nil { return } - } - - makeQueryParams := func(base string, params interface{}) string { - paramsMap := params.(map[string]interface{}) - url := base + "?" - for i, v := range paramsMap { - url = fmt.Sprintf("%s%s=%s&", url, i, v) + if err := conn.SetWriteDeadline(time.Now().Add(writeDeadline)); err != nil { + return } - return url - } - - logEventDescription := func(url string, startTime time.Time, status int, logger logging.Logger) { - fields := logrus.Fields{ - "uri": url, - "duration": time.Since(startTime).String(), - "status": status, + if err := conn.WriteMessage(websocket.TextMessage, response.Marshal()); err != nil { + h.logger.Debugf("ws event handler: failed to write error response: %v", err) + h.logger.Error("ws event handler: failed to write error response") + return } - logger.WithFields(fields).Log(logrus.DebugLevel, "ws event response: ") + logEventDescription(string(response.Event), time.Now(), response.StatusCode, h.logger) } for { @@ -297,652 +157,1899 @@ func (h *Handler) handleEvents(conn *websocket.Conn) error { } return conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, err.Error())) } + res.Id = req.Id res.Event = req.Event if err := conn.SetReadDeadline(time.Time{}); err != nil { continue } switch req.Event { // user related events - case common.UserSignup: - jsonBytes, _ := json.Marshal(req.Params) - httpReq, err := newRequest(http.MethodPost, string(common.UserSignup), jsonBytes) + case common.UserLoginV2: + jsonBytes, err := json.Marshal(req.Params) if err != nil { respondWithError(res, err) continue } - h.UserSignupHandler(res, httpReq) - cookie = res.Header()["Set-Cookie"] - logEventDescription(string(common.UserSignup), to, res.StatusCode, h.logger) - case common.UserLogin: - jsonBytes, _ := json.Marshal(req.Params) - httpReq, err := newRequest(http.MethodPost, string(common.UserLogin), jsonBytes) + loginRequest := &common.UserSignupRequest{} + err = json.Unmarshal(jsonBytes, loginRequest) if err != nil { respondWithError(res, err) continue } - h.UserLoginHandler(res, httpReq) - cookie = res.Header()["Set-Cookie"] - logEventDescription(string(common.UserLogin), to, res.StatusCode, h.logger) - case common.UserPresent: - url := makeQueryParams(string(common.UserPresent), req.Params) - httpReq, err := newRequest(http.MethodGet, url, nil) + ui, nameHash, publicKey, err := h.dfsAPI.LoginUserV2(loginRequest.UserName, loginRequest.Password, "") if err != nil { respondWithError(res, err) continue } - h.UserPresentHandler(res, httpReq) - logEventDescription(string(common.UserPresent), to, res.StatusCode, h.logger) - case common.UserIsLoggedin: - url := makeQueryParams(string(common.UserIsLoggedin), req.Params) - httpReq, err := newRequest(http.MethodGet, url, nil) + sessionID = ui.GetSessionId() + loginResponse := &UserSignupResponse{ + NameHash: nameHash, + PublicKey: publicKey, + } + resBytes, err := json.Marshal(loginResponse) if err != nil { respondWithError(res, err) continue } - h.IsUserLoggedInHandler(res, httpReq) - logEventDescription(string(common.UserIsLoggedin), to, res.StatusCode, h.logger) - case common.UserLogout: - httpReq, err := newRequest(http.MethodPost, string(common.UserLogout), nil) + res.StatusCode = http.StatusOK + _, err = res.WriteJson(resBytes) if err != nil { respondWithError(res, err) continue } - h.UserLogoutHandler(res, httpReq) - logEventDescription(string(common.UserLogout), to, res.StatusCode, h.logger) - case common.UserDelete: - jsonBytes, _ := json.Marshal(req.Params) - httpReq, err := newRequest(http.MethodDelete, string(common.UserDelete), jsonBytes) + logEventDescription(string(common.UserLogin), to, http.StatusOK, h.logger) + case common.UserPresentV2: + jsonBytes, err := json.Marshal(req.Params) if err != nil { respondWithError(res, err) continue } - h.UserDeleteHandler(res, httpReq) - logEventDescription(string(common.UserDelete), to, res.StatusCode, h.logger) - case common.UserStat: - httpReq, err := newRequest(http.MethodGet, string(common.UserStat), nil) + request := &common.UserSignupRequest{} + err = json.Unmarshal(jsonBytes, request) if err != nil { respondWithError(res, err) continue } - h.UserStatHandler(res, httpReq) - logEventDescription(string(common.UserStat), to, res.StatusCode, h.logger) - // pod related events - case common.PodReceive: - url := makeQueryParams(string(common.PodReceive), req.Params) - httpReq, err := newRequest(http.MethodGet, url, nil) + presentResponse := &PresentResponse{ + Present: h.dfsAPI.IsUserNameAvailableV2(request.UserName), + } + resBytes, err := json.Marshal(presentResponse) if err != nil { respondWithError(res, err) continue } - h.PodReceiveHandler(res, httpReq) - logEventDescription(string(common.PodReceive), to, res.StatusCode, h.logger) - case common.PodReceiveInfo: - url := makeQueryParams(string(common.PodReceiveInfo), req.Params) - httpReq, err := newRequest(http.MethodGet, url, nil) + res.StatusCode = http.StatusOK + _, err = res.WriteJson(resBytes) if err != nil { respondWithError(res, err) continue } - h.PodReceiveInfoHandler(res, httpReq) - logEventDescription(string(common.PodReceiveInfo), to, res.StatusCode, h.logger) - case common.PodNew: - jsonBytes, _ := json.Marshal(req.Params) - httpReq, err := newRequest(http.MethodPost, string(common.PodNew), jsonBytes) + logEventDescription(string(common.UserPresentV2), to, res.StatusCode, h.logger) + case common.UserIsLoggedin: + jsonBytes, err := json.Marshal(req.Params) if err != nil { respondWithError(res, err) continue } - h.PodCreateHandler(res, httpReq) - logEventDescription(string(common.PodNew), to, res.StatusCode, h.logger) - case common.PodOpen: - jsonBytes, _ := json.Marshal(req.Params) - httpReq, err := newRequest(http.MethodPost, string(common.PodOpen), jsonBytes) + request := &common.UserSignupRequest{} + err = json.Unmarshal(jsonBytes, request) if err != nil { respondWithError(res, err) continue } - h.PodOpenHandler(res, httpReq) - logEventDescription(string(common.PodOpen), to, res.StatusCode, h.logger) - case common.PodClose: - jsonBytes, _ := json.Marshal(req.Params) - httpReq, err := newRequest(http.MethodPost, string(common.PodClose), jsonBytes) + + loggedInResponse := &LoginStatus{ + LoggedIn: h.dfsAPI.IsUserLoggedIn(request.UserName), + } + resBytes, err := json.Marshal(loggedInResponse) if err != nil { respondWithError(res, err) continue } - h.PodCloseHandler(res, httpReq) - logEventDescription(string(common.PodClose), to, res.StatusCode, h.logger) - case common.PodSync: - jsonBytes, _ := json.Marshal(req.Params) - httpReq, err := newRequest(http.MethodPost, string(common.PodSync), jsonBytes) + res.StatusCode = http.StatusOK + _, err = res.WriteJson(resBytes) if err != nil { respondWithError(res, err) continue } - h.PodSyncHandler(res, httpReq) - logEventDescription(string(common.PodSync), to, res.StatusCode, h.logger) - case common.PodShare: - jsonBytes, _ := json.Marshal(req.Params) - httpReq, err := newRequest(http.MethodPost, string(common.PodShare), jsonBytes) + logEventDescription(string(common.UserIsLoggedin), to, res.StatusCode, h.logger) + case common.UserLogout: + err := h.dfsAPI.LogoutUser(sessionID) if err != nil { respondWithError(res, err) continue } - h.PodShareHandler(res, httpReq) - logEventDescription(string(common.PodShare), to, res.StatusCode, h.logger) - case common.PodDelete: - jsonBytes, _ := json.Marshal(req.Params) - httpReq, err := newRequest(http.MethodDelete, string(common.PodDelete), jsonBytes) + message := map[string]interface{}{} + message["message"] = "user logged out successfully" + + messageBytes, err := json.Marshal(message) if err != nil { respondWithError(res, err) continue } - h.PodDeleteHandler(res, httpReq) - logEventDescription(string(common.PodDelete), to, res.StatusCode, h.logger) - case common.PodLs: - httpReq, err := newRequest(http.MethodGet, string(common.PodLs), nil) + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) if err != nil { respondWithError(res, err) continue } - h.PodListHandler(res, httpReq) - logEventDescription(string(common.PodLs), to, res.StatusCode, h.logger) - case common.PodStat: - url := makeQueryParams(string(common.UserPresent), req.Params) - httpReq, err := newRequest(http.MethodGet, url, nil) + logEventDescription(string(common.UserLogout), to, res.StatusCode, h.logger) + case common.UserDelete: + jsonBytes, err := json.Marshal(req.Params) if err != nil { respondWithError(res, err) continue } - h.PodStatHandler(res, httpReq) - logEventDescription(string(common.PodStat), to, res.StatusCode, h.logger) - - // file related events - case common.DirMkdir: - jsonBytes, _ := json.Marshal(req.Params) - httpReq, err := newRequest(http.MethodPost, string(common.DirMkdir), jsonBytes) + request := &common.UserSignupRequest{} + err = json.Unmarshal(jsonBytes, request) if err != nil { respondWithError(res, err) continue } - h.DirectoryMkdirHandler(res, httpReq) - logEventDescription(string(common.DirMkdir), to, res.StatusCode, h.logger) - case common.DirRmdir: - jsonBytes, _ := json.Marshal(req.Params) - httpReq, err := newRequest(http.MethodDelete, string(common.DirRmdir), jsonBytes) + err = h.dfsAPI.DeleteUserV2(request.Password, sessionID) if err != nil { respondWithError(res, err) continue } - h.DirectoryRmdirHandler(res, httpReq) - logEventDescription(string(common.DirRmdir), to, res.StatusCode, h.logger) - case common.DirLs: - url := makeQueryParams(string(common.DirLs), req.Params) - httpReq, err := newRequest(http.MethodGet, url, nil) + message := map[string]interface{}{} + message["message"] = "user deleted successfully" + + resBytes, err := json.Marshal(message) if err != nil { respondWithError(res, err) continue } - h.DirectoryLsHandler(res, httpReq) - logEventDescription(string(common.DirLs), to, res.StatusCode, h.logger) - case common.DirStat: - url := makeQueryParams(string(common.DirStat), req.Params) - httpReq, err := newRequest(http.MethodGet, url, nil) + res.StatusCode = http.StatusOK + _, err = res.WriteJson(resBytes) if err != nil { respondWithError(res, err) continue } - h.DirectoryStatHandler(res, httpReq) - logEventDescription(string(common.DirStat), to, res.StatusCode, h.logger) - case common.DirIsPresent: - url := makeQueryParams(string(common.DirIsPresent), req.Params) - httpReq, err := newRequest(http.MethodGet, url, nil) + logEventDescription(string(common.UserDelete), to, res.StatusCode, h.logger) + case common.UserStat: + userStat, err := h.dfsAPI.GetUserStat(sessionID) if err != nil { respondWithError(res, err) continue } - h.DirectoryPresentHandler(res, httpReq) - logEventDescription(string(common.DirIsPresent), to, res.StatusCode, h.logger) - case common.FileDownloadStream: - jsonBytes, _ := json.Marshal(req.Params) - args := make(map[string]string) - if err := json.Unmarshal(jsonBytes, &args); err != nil { - h.logger.Debugf("ws event handler: download: failed to read params: %v", err) - h.logger.Error("ws event handler: download: failed to read params") + resBytes, err := json.Marshal(userStat) + if err != nil { respondWithError(res, err) continue } - body := new(bytes.Buffer) - writer := multipart.NewWriter(body) - for k, v := range args { - err := writer.WriteField(k, v) - if err != nil { - h.logger.Debugf("ws event handler: download: failed to write fields in form: %v", err) - h.logger.Error("ws event handler: download: failed to write fields in form") - respondWithError(res, err) - continue - } - } - err = writer.Close() + res.StatusCode = http.StatusOK + _, err = res.WriteJson(resBytes) if err != nil { - h.logger.Debugf("ws event handler: download: failed to close writer: %v", err) - h.logger.Error("ws event handler: download: failed to close writer") respondWithError(res, err) continue } - httpReq, err := newMultipartRequest(http.MethodPost, string(common.FileDownload), writer.Boundary(), body) + logEventDescription(string(common.UserStat), to, res.StatusCode, h.logger) + // pod related events + case common.PodReceive: + jsonBytes, err := json.Marshal(req.Params) if err != nil { respondWithError(res, err) continue } - h.FileDownloadHandler(res, httpReq) - if res.StatusCode != 0 { - errMessage := res.Params.(map[string]interface{}) - respondWithError(res, fmt.Errorf("%s", errMessage["message"])) + request := &common.PodReceiveRequest{} + err = json.Unmarshal(jsonBytes, request) + if err != nil { + respondWithError(res, err) continue } - downloadConfirmResponse := common.NewWebsocketResponse() - downloadConfirmResponse.Event = common.FileDownloadStream - downloadConfirmResponse.Header().Set("Content-Type", "application/json; charset=utf-8") - if res.Header().Get("Content-Length") != "" { - dlMessage := map[string]string{} - dlMessage["content_length"] = res.Header().Get("Content-Length") - dlMessage["file_name"] = filepath.Base(args["file_path"]) - data, _ := json.Marshal(dlMessage) - _, err = downloadConfirmResponse.Write(data) - if err != nil { - h.logger.Debugf("ws event handler: download: failed to send download confirm: %v", err) - h.logger.Error("ws event handler: download: failed to send download confirm") - continue - } - } - downloadConfirmResponse.WriteHeader(http.StatusOK) - if err := conn.WriteMessage(messageType, downloadConfirmResponse.Marshal()); err != nil { - h.logger.Debugf("ws event handler: download: failed to write in connection: %v", err) - h.logger.Error("ws event handler: download: failed to write in connection") - continue - } - if res.StatusCode == 0 { - messageType = websocket.BinaryMessage - data := res.Marshal() - head := 0 - tail := len(data) - for head+wsChunkLimit < tail { - if err := conn.WriteMessage(messageType, data[head:(head+wsChunkLimit)]); err != nil { - h.logger.Debugf("ws event handler: response: failed to write in connection: %v", err) - h.logger.Error("ws event handler: response: failed to write in connection") - return err - } - head += wsChunkLimit - } - if err := conn.WriteMessage(messageType, data[head:tail]); err != nil { - h.logger.Debugf("ws event handler: response: failed to write in connection: %v", err) - h.logger.Error("ws event handler: response: failed to write in connection") - return err - } - } - messageType = websocket.TextMessage - res.Header().Set("Content-Type", "application/json; charset=utf-8") - if res.Header().Get("Content-Length") != "" { - dlFinishedMessage := map[string]string{} - dlFinishedMessage["message"] = "download finished" - data, _ := json.Marshal(dlFinishedMessage) - _, err = res.Write(data) - if err != nil { - h.logger.Debugf("ws event handler: download: failed to send download confirm: %v", err) - h.logger.Error("ws event handler: download: failed to send download confirm") - continue - } - res.WriteHeader(http.StatusOK) - } - logEventDescription(string(common.FileDownloadStream), to, res.StatusCode, h.logger) - case common.FileDownload: - jsonBytes, _ := json.Marshal(req.Params) - args := make(map[string]string) - if err := json.Unmarshal(jsonBytes, &args); err != nil { - h.logger.Debugf("ws event handler: download: failed to read params: %v", err) - h.logger.Error("ws event handler: download: failed to read params") + ref, err := utils.ParseHexReference(request.Reference) + if err != nil { respondWithError(res, err) continue } - body := new(bytes.Buffer) - writer := multipart.NewWriter(body) - for k, v := range args { - err := writer.WriteField(k, v) - if err != nil { - h.logger.Debugf("ws event handler: download: failed to write fields in form: %v", err) - h.logger.Error("ws event handler: download: failed to write fields in form") - respondWithError(res, err) - continue - } - } - err = writer.Close() + pi, err := h.dfsAPI.PodReceive(sessionID, request.PodName, ref) if err != nil { - h.logger.Debugf("ws event handler: download: failed to close writer: %v", err) - h.logger.Error("ws event handler: download: failed to close writer") respondWithError(res, err) continue } - httpReq, err := newMultipartRequest(http.MethodPost, string(common.FileDownload), writer.Boundary(), body) + message := map[string]interface{}{} + message["message"] = fmt.Sprintf("public pod %q, added as shared pod", pi.GetPodName()) + + resBytes, err := json.Marshal(message) if err != nil { respondWithError(res, err) continue } - h.FileDownloadHandler(res, httpReq) - if res.StatusCode != 0 { - errMessage := res.Params.(map[string]interface{}) - respondWithError(res, fmt.Errorf("%s", errMessage["message"])) + res.StatusCode = http.StatusOK + _, err = res.WriteJson(resBytes) + if err != nil { + respondWithError(res, err) continue } - downloadConfirmResponse := common.NewWebsocketResponse() - downloadConfirmResponse.Event = common.FileDownload - downloadConfirmResponse.Header().Set("Content-Type", "application/json; charset=utf-8") - if res.Header().Get("Content-Length") != "" { - dlMessage := map[string]string{} - dlMessage["content_length"] = res.Header().Get("Content-Length") - dlMessage["file_name"] = filepath.Base(args["file_path"]) - data, _ := json.Marshal(dlMessage) - _, err = downloadConfirmResponse.Write(data) - if err != nil { - h.logger.Debugf("ws event handler: download: failed to send download confirm: %v", err) - h.logger.Error("ws event handler: download: failed to send download confirm") - continue - } - } - downloadConfirmResponse.WriteHeader(http.StatusOK) - if err := conn.WriteMessage(messageType, downloadConfirmResponse.Marshal()); err != nil { - h.logger.Debugf("ws event handler: download: failed to write in connection: %v", err) - h.logger.Error("ws event handler: download: failed to write in connection") + logEventDescription(string(common.PodReceive), to, res.StatusCode, h.logger) + case common.PodReceiveInfo: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) continue } - messageType = websocket.BinaryMessage - if err := conn.WriteMessage(messageType, res.Marshal()); err != nil { - h.logger.Debugf("ws event handler: response: failed to write in connection: %v", err) - h.logger.Error("ws event handler: response: failed to write in connection") - return err - } - messageType = websocket.TextMessage - res.Header().Set("Content-Type", "application/json; charset=utf-8") - if res.Header().Get("Content-Length") != "" { - dlFinishedMessage := map[string]string{} - dlFinishedMessage["message"] = "download finished" - data, _ := json.Marshal(dlFinishedMessage) - _, err = res.Write(data) - if err != nil { - h.logger.Debugf("ws event handler: download: failed to send download confirm: %v", err) - h.logger.Error("ws event handler: download: failed to send download confirm") - continue - } - res.WriteHeader(http.StatusOK) - } - logEventDescription(string(common.FileDownload), to, res.StatusCode, h.logger) - case common.FileUpload, common.FileUploadStream: - streaming := false - if req.Event == common.FileUploadStream { - streaming = true + request := &common.PodReceiveRequest{} + err = json.Unmarshal(jsonBytes, request) + if err != nil { + respondWithError(res, err) + continue } - httpReq, err := newMultipartRequestWithBinaryMessage(req.Params, "files", http.MethodPost, string(req.Event), streaming) + ref, err := utils.ParseHexReference(request.Reference) if err != nil { respondWithError(res, err) continue } - h.FileUploadHandler(res, httpReq) - logEventDescription(string(common.FileUpload), to, res.StatusCode, h.logger) - case common.FileShare: - jsonBytes, _ := json.Marshal(req.Params) - httpReq, err := newRequest(http.MethodPost, string(common.FileShare), jsonBytes) + shareInfo, err := h.dfsAPI.PodReceiveInfo(sessionID, ref) if err != nil { respondWithError(res, err) continue } - h.FileShareHandler(res, httpReq) - logEventDescription(string(common.FileShare), to, res.StatusCode, h.logger) - case common.FileReceive: - url := makeQueryParams(string(common.FileReceive), req.Params) - httpReq, err := newRequest(http.MethodGet, url, nil) + + resBytes, err := json.Marshal(shareInfo) if err != nil { respondWithError(res, err) continue } - h.FileReceiveHandler(res, httpReq) - logEventDescription(string(common.FileReceive), to, res.StatusCode, h.logger) - case common.FileReceiveInfo: - url := makeQueryParams(string(common.FileReceiveInfo), req.Params) - httpReq, err := newRequest(http.MethodGet, url, nil) + res.StatusCode = http.StatusOK + _, err = res.WriteJson(resBytes) if err != nil { respondWithError(res, err) continue } - h.FileReceiveInfoHandler(res, httpReq) - logEventDescription(string(common.FileReceiveInfo), to, res.StatusCode, h.logger) - case common.FileDelete: - jsonBytes, _ := json.Marshal(req.Params) - httpReq, err := newRequest(http.MethodDelete, string(common.FileDelete), jsonBytes) + logEventDescription(string(common.PodReceiveInfo), to, res.StatusCode, h.logger) + case common.PodNew: + jsonBytes, err := json.Marshal(req.Params) if err != nil { respondWithError(res, err) continue } - h.FileDeleteHandler(res, httpReq) - logEventDescription(string(common.FileDelete), to, res.StatusCode, h.logger) - case common.FileStat: - url := makeQueryParams(string(common.FileStat), req.Params) - httpReq, err := newRequest(http.MethodGet, url, nil) + podReq := &common.PodRequest{} + err = json.Unmarshal(jsonBytes, podReq) if err != nil { respondWithError(res, err) continue } - h.FileStatHandler(res, httpReq) - logEventDescription(string(common.FileStat), to, res.StatusCode, h.logger) - // kv related events - case common.KVCreate: - jsonBytes, _ := json.Marshal(req.Params) - httpReq, err := newRequest(http.MethodPost, string(common.KVCreate), jsonBytes) + _, err = h.dfsAPI.CreatePod(podReq.PodName, sessionID) if err != nil { respondWithError(res, err) continue } - h.KVCreateHandler(res, httpReq) - logEventDescription(string(common.KVCreate), to, res.StatusCode, h.logger) - case common.KVList: - url := makeQueryParams(string(common.KVList), req.Params) - httpReq, err := newRequest(http.MethodGet, url, nil) + message := map[string]interface{}{} + message["message"] = "pod created successfully" + + messageBytes, err := json.Marshal(message) if err != nil { respondWithError(res, err) continue } - h.KVListHandler(res, httpReq) - logEventDescription(string(common.KVList), to, res.StatusCode, h.logger) - case common.KVOpen: - jsonBytes, _ := json.Marshal(req.Params) - httpReq, err := newRequest(http.MethodPost, string(common.KVOpen), jsonBytes) + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) if err != nil { respondWithError(res, err) continue } - h.KVOpenHandler(res, httpReq) - logEventDescription(string(common.KVOpen), to, res.StatusCode, h.logger) - case common.KVCount: - jsonBytes, _ := json.Marshal(req.Params) - httpReq, err := newRequest(http.MethodPost, string(common.KVCount), jsonBytes) + logEventDescription(string(common.PodNew), to, res.StatusCode, h.logger) + case common.PodOpen: + jsonBytes, err := json.Marshal(req.Params) if err != nil { respondWithError(res, err) continue } - h.KVCountHandler(res, httpReq) - logEventDescription(string(common.KVCount), to, res.StatusCode, h.logger) - case common.KVDelete: - jsonBytes, _ := json.Marshal(req.Params) - httpReq, err := newRequest(http.MethodDelete, string(common.KVDelete), jsonBytes) + podReq := &common.PodRequest{} + err = json.Unmarshal(jsonBytes, podReq) if err != nil { respondWithError(res, err) continue } - h.KVDeleteHandler(res, httpReq) - logEventDescription(string(common.KVDelete), to, res.StatusCode, h.logger) - case common.KVEntryPut: - jsonBytes, _ := json.Marshal(req.Params) - httpReq, err := newRequest(http.MethodPost, string(common.KVEntryPut), jsonBytes) + + _, err = h.dfsAPI.OpenPod(podReq.PodName, sessionID) if err != nil { respondWithError(res, err) continue } - h.KVPutHandler(res, httpReq) - logEventDescription(string(common.KVEntryPut), to, res.StatusCode, h.logger) - case common.KVEntryGet: - url := makeQueryParams(string(common.KVEntryGet), req.Params) - httpReq, err := newRequest(http.MethodGet, url, nil) + message := map[string]interface{}{} + message["message"] = "pod opened successfully" + + messageBytes, err := json.Marshal(message) if err != nil { respondWithError(res, err) continue } - h.KVGetHandler(res, httpReq) - logEventDescription(string(common.KVEntryGet), to, res.StatusCode, h.logger) - case common.KVEntryDelete: - jsonBytes, _ := json.Marshal(req.Params) - httpReq, err := newRequest(http.MethodDelete, string(common.KVEntryDelete), jsonBytes) + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) if err != nil { respondWithError(res, err) continue } - h.KVDelHandler(res, httpReq) - logEventDescription(string(common.KVEntryDelete), to, res.StatusCode, h.logger) - case common.KVLoadCSV, common.KVLoadCSVStream: - streaming := false - if req.Event == common.KVLoadCSVStream { - streaming = true - } - httpReq, err := newMultipartRequestWithBinaryMessage(req.Params, "csv", http.MethodPost, string(req.Event), streaming) + logEventDescription(string(common.PodOpen), to, res.StatusCode, h.logger) + case common.PodClose: + jsonBytes, err := json.Marshal(req.Params) if err != nil { respondWithError(res, err) continue } - - h.KVLoadCSVHandler(res, httpReq) - logEventDescription(string(common.KVLoadCSV), to, res.StatusCode, h.logger) - case common.KVSeek: - jsonBytes, _ := json.Marshal(req.Params) - httpReq, err := newRequest(http.MethodPost, string(common.KVSeek), jsonBytes) + podReq := &common.PodRequest{} + err = json.Unmarshal(jsonBytes, podReq) if err != nil { respondWithError(res, err) continue } - h.KVSeekHandler(res, httpReq) - logEventDescription(string(common.KVSeek), to, res.StatusCode, h.logger) - case common.KVSeekNext: - url := makeQueryParams(string(common.KVSeekNext), req.Params) - httpReq, err := newRequest(http.MethodGet, url, nil) + + err = h.dfsAPI.ClosePod(podReq.PodName, sessionID) if err != nil { respondWithError(res, err) continue } - h.KVGetNextHandler(res, httpReq) - logEventDescription(string(common.KVSeekNext), to, res.StatusCode, h.logger) + message := map[string]interface{}{} + message["message"] = "pod closed successfully" - // doc related events - case common.DocCreate: - jsonBytes, _ := json.Marshal(req.Params) - httpReq, err := newRequest(http.MethodPost, string(common.DocCreate), jsonBytes) + messageBytes, err := json.Marshal(message) if err != nil { respondWithError(res, err) continue } - h.DocCreateHandler(res, httpReq) - logEventDescription(string(common.DocCreate), to, res.StatusCode, h.logger) - case common.DocList: - url := makeQueryParams(string(common.DocList), req.Params) - httpReq, err := newRequest(http.MethodGet, url, nil) + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) if err != nil { respondWithError(res, err) continue } - h.DocListHandler(res, httpReq) - logEventDescription(string(common.DocList), to, res.StatusCode, h.logger) - case common.DocOpen: - jsonBytes, _ := json.Marshal(req.Params) - httpReq, err := newRequest(http.MethodPost, string(common.DocOpen), jsonBytes) + logEventDescription(string(common.PodClose), to, res.StatusCode, h.logger) + case common.PodSync: + jsonBytes, err := json.Marshal(req.Params) if err != nil { respondWithError(res, err) continue } - h.DocOpenHandler(res, httpReq) - logEventDescription(string(common.DocOpen), to, res.StatusCode, h.logger) - case common.DocCount: - jsonBytes, _ := json.Marshal(req.Params) - httpReq, err := newRequest(http.MethodPost, string(common.DocCount), jsonBytes) + podReq := &common.PodRequest{} + err = json.Unmarshal(jsonBytes, podReq) if err != nil { respondWithError(res, err) continue } - h.DocCountHandler(res, httpReq) - logEventDescription(string(common.DocCount), to, res.StatusCode, h.logger) - case common.DocDelete: - jsonBytes, _ := json.Marshal(req.Params) - httpReq, err := newRequest(http.MethodDelete, string(common.DocDelete), jsonBytes) + + err = h.dfsAPI.SyncPod(podReq.PodName, sessionID) if err != nil { respondWithError(res, err) continue } - h.DocDeleteHandler(res, httpReq) - logEventDescription(string(common.DocDelete), to, res.StatusCode, h.logger) - case common.DocFind: - url := makeQueryParams(string(common.DocFind), req.Params) - httpReq, err := newRequest(http.MethodGet, url, nil) + message := map[string]interface{}{} + message["message"] = "pod synced successfully" + + messageBytes, err := json.Marshal(message) if err != nil { respondWithError(res, err) continue } - h.DocFindHandler(res, httpReq) - logEventDescription(string(common.DocFind), to, res.StatusCode, h.logger) - case common.DocEntryPut: - jsonBytes, _ := json.Marshal(req.Params) - httpReq, err := newRequest(http.MethodPost, string(common.DocEntryPut), jsonBytes) + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) if err != nil { respondWithError(res, err) continue } - h.DocPutHandler(res, httpReq) - logEventDescription(string(common.DocEntryPut), to, res.StatusCode, h.logger) - case common.DocEntryGet: - url := makeQueryParams(string(common.DocEntryGet), req.Params) - httpReq, err := newRequest(http.MethodGet, url, nil) + logEventDescription(string(common.PodSync), to, res.StatusCode, h.logger) + case common.PodShare: + jsonBytes, err := json.Marshal(req.Params) if err != nil { respondWithError(res, err) continue } - h.DocGetHandler(res, httpReq) - logEventDescription(string(common.DocEntryGet), to, res.StatusCode, h.logger) + podReq := &common.PodRequest{} + err = json.Unmarshal(jsonBytes, podReq) + if err != nil { + respondWithError(res, err) + continue + } + sharedPodName := podReq.SharedPodName + if sharedPodName == "" { + sharedPodName = podReq.PodName + } + sharingRef, err := h.dfsAPI.PodShare(podReq.PodName, sharedPodName, sessionID) + if err != nil { + respondWithError(res, err) + continue + } + response := &PodSharingReference{ + Reference: sharingRef, + } + resBytes, err := json.Marshal(response) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(resBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.PodShare), to, res.StatusCode, h.logger) + case common.PodDelete: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + podReq := &common.PodRequest{} + err = json.Unmarshal(jsonBytes, podReq) + if err != nil { + respondWithError(res, err) + continue + } + + err = h.dfsAPI.DeletePod(podReq.PodName, sessionID) + if err != nil { + respondWithError(res, err) + continue + } + message := map[string]interface{}{} + message["message"] = "pod deleted successfully" + + messageBytes, err := json.Marshal(message) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.PodDelete), to, res.StatusCode, h.logger) + case common.PodLs: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + podReq := &common.PodRequest{} + err = json.Unmarshal(jsonBytes, podReq) + if err != nil { + respondWithError(res, err) + continue + } + pods, sharedPods, err := h.dfsAPI.ListPods(sessionID) + if err != nil { + respondWithError(res, err) + continue + } + if pods == nil { + pods = make([]string, 0) + } + if sharedPods == nil { + sharedPods = make([]string, 0) + } + listResponse := &PodListResponse{ + Pods: pods, + SharedPods: sharedPods, + } + resBytes, err := json.Marshal(listResponse) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(resBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.PodLs), to, res.StatusCode, h.logger) + case common.PodStat: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + podReq := &common.PodRequest{} + err = json.Unmarshal(jsonBytes, podReq) + if err != nil { + respondWithError(res, err) + continue + } + + stat, err := h.dfsAPI.PodStat(podReq.PodName, sessionID) + if err != nil { + respondWithError(res, err) + continue + } + podStatRenponse := &PodStatResponse{ + PodName: stat.PodName, + PodAddress: stat.PodAddress, + } + + messageBytes, err := json.Marshal(podStatRenponse) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.PodStat), to, res.StatusCode, h.logger) + + // file related events + case common.DirMkdir: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + fsReq := &common.FileSystemRequest{} + err = json.Unmarshal(jsonBytes, fsReq) + if err != nil { + respondWithError(res, err) + continue + } + err = h.dfsAPI.Mkdir(fsReq.PodName, fsReq.DirectoryPath, sessionID) + if err != nil { + respondWithError(res, err) + continue + } + message := map[string]interface{}{} + message["message"] = "directory created successfully" + + messageBytes, err := json.Marshal(message) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.DirMkdir), to, res.StatusCode, h.logger) + case common.DirRmdir: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + fsReq := &common.FileSystemRequest{} + err = json.Unmarshal(jsonBytes, fsReq) + if err != nil { + respondWithError(res, err) + continue + } + err = h.dfsAPI.RmDir(fsReq.PodName, fsReq.DirectoryPath, sessionID) + if err != nil { + respondWithError(res, err) + continue + } + message := map[string]interface{}{} + message["message"] = "directory removed successfully" + + messageBytes, err := json.Marshal(message) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.DirRmdir), to, res.StatusCode, h.logger) + case common.DirLs: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + fsReq := &common.FileSystemRequest{} + err = json.Unmarshal(jsonBytes, fsReq) + if err != nil { + respondWithError(res, err) + continue + } + dEntries, fEntries, err := h.dfsAPI.ListDir(fsReq.PodName, fsReq.DirectoryPath, sessionID) + if err != nil { + respondWithError(res, err) + continue + } + if dEntries == nil { + dEntries = make([]dir.Entry, 0) + } + if fEntries == nil { + fEntries = make([]file.Entry, 0) + } + listResponse := &ListFileResponse{ + Directories: dEntries, + Files: fEntries, + } + messageBytes, err := json.Marshal(listResponse) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.DirLs), to, res.StatusCode, h.logger) + case common.DirStat: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + fsReq := &common.FileSystemRequest{} + err = json.Unmarshal(jsonBytes, fsReq) + if err != nil { + respondWithError(res, err) + continue + } + ds, err := h.dfsAPI.DirectoryStat(fsReq.PodName, fsReq.DirectoryPath, sessionID) + if err != nil { + respondWithError(res, err) + continue + } + + messageBytes, err := json.Marshal(ds) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.DirStat), to, res.StatusCode, h.logger) + case common.DirIsPresent: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + fsReq := &common.FileSystemRequest{} + err = json.Unmarshal(jsonBytes, fsReq) + if err != nil { + respondWithError(res, err) + continue + } + present, err := h.dfsAPI.IsDirPresent(fsReq.PodName, fsReq.DirectoryPath, sessionID) + if err != nil { + respondWithError(res, err) + continue + } + presentResponse := &DirPresentResponse{ + Present: present, + } + messageBytes, err := json.Marshal(presentResponse) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.DirIsPresent), to, res.StatusCode, h.logger) + // case common.FileDownloadStream: + // jsonBytes, _ := json.Marshal(req.Params) + // args := make(map[string]string) + // if err := json.Unmarshal(jsonBytes, &args); err != nil { + // h.logger.Debugf("ws event handler: download: failed to read params: %v", err) + // h.logger.Error("ws event handler: download: failed to read params") + // respondWithError(res, err) + // continue + // } + // body := new(bytes.Buffer) + // writer := multipart.NewWriter(body) + // for k, v := range args { + // err := writer.WriteField(k, v) + // if err != nil { + // h.logger.Debugf("ws event handler: download: failed to write fields in form: %v", err) + // h.logger.Error("ws event handler: download: failed to write fields in form") + // respondWithError(res, err) + // continue + // } + // } + // err = writer.Close() + // if err != nil { + // h.logger.Debugf("ws event handler: download: failed to close writer: %v", err) + // h.logger.Error("ws event handler: download: failed to close writer") + // respondWithError(res, err) + // continue + // } + // httpReq, err := newMultipartRequest(http.MethodPost, string(common.FileDownload), writer.Boundary(), body) + // if err != nil { + // respondWithError(res, err) + // continue + // } + // h.FileDownloadHandlerPost(res, httpReq) + // if res.StatusCode != 0 { + // errMessage := res.Params.(map[string]interface{}) + // respondWithError(res, fmt.Errorf("%s", errMessage["message"])) + // continue + // } + // downloadConfirmResponse := common.NewWebsocketResponse() + // downloadConfirmResponse.Event = common.FileDownloadStream + // downloadConfirmResponse.Header().Set("Content-Type", "application/json; charset=utf-8") + // if res.Header().Get("Content-Length") != "" { + // dlMessage := map[string]string{} + // dlMessage["content_length"] = res.Header().Get("Content-Length") + // dlMessage["file_name"] = filepath.Base(args["file_path"]) + // data, _ := json.Marshal(dlMessage) + // _, err = downloadConfirmResponse.Write(data) + // if err != nil { + // h.logger.Debugf("ws event handler: download: failed to send download confirm: %v", err) + // h.logger.Error("ws event handler: download: failed to send download confirm") + // continue + // } + // } + // downloadConfirmResponse.WriteHeader(http.StatusOK) + // if err := conn.WriteMessage(messageType, downloadConfirmResponse.Marshal()); err != nil { + // h.logger.Debugf("ws event handler: download: failed to write in connection: %v", err) + // h.logger.Error("ws event handler: download: failed to write in connection") + // continue + // } + // if res.StatusCode == 0 { + // messageType = websocket.BinaryMessage + // data := res.Marshal() + // head := 0 + // tail := len(data) + // for head+wsChunkLimit < tail { + // if err := conn.WriteMessage(messageType, data[head:(head+wsChunkLimit)]); err != nil { + // h.logger.Debugf("ws event handler: response: failed to write in connection: %v", err) + // h.logger.Error("ws event handler: response: failed to write in connection") + // return err + // } + // head += wsChunkLimit + // } + // if err := conn.WriteMessage(messageType, data[head:tail]); err != nil { + // h.logger.Debugf("ws event handler: response: failed to write in connection: %v", err) + // h.logger.Error("ws event handler: response: failed to write in connection") + // return err + // } + // } + // messageType = websocket.TextMessage + // res.Header().Set("Content-Type", "application/json; charset=utf-8") + // if res.Header().Get("Content-Length") != "" { + // dlFinishedMessage := map[string]string{} + // dlFinishedMessage["message"] = "download finished" + // data, _ := json.Marshal(dlFinishedMessage) + // _, err = res.Write(data) + // if err != nil { + // h.logger.Debugf("ws event handler: download: failed to send download confirm: %v", err) + // h.logger.Error("ws event handler: download: failed to send download confirm") + // continue + // } + // res.WriteHeader(http.StatusOK) + // } + // logEventDescription(string(common.FileDownloadStream), to, res.StatusCode, h.logger) + case common.FileDownload: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + fsReq := &common.FileDownloadRequest{} + err = json.Unmarshal(jsonBytes, fsReq) + if err != nil { + respondWithError(res, err) + continue + } + data, n, err := h.dfsAPI.DownloadFile(fsReq.PodName, fsReq.Filepath, sessionID) + if err != nil { + respondWithError(res, err) + continue + } + + buf := new(bytes.Buffer) + _, err = buf.ReadFrom(data) + if err != nil { + respondWithError(res, err) + continue + } + data.Close() + downloadConfirmResponse := common.NewWebsocketResponse() + downloadConfirmResponse.Event = common.FileDownload + downloadConfirmResponse.Id = res.Id + dlMessage := map[string]string{} + dlMessage["content_length"] = fmt.Sprintf("%d", n) + dlMessage["file_name"] = filepath.Base(fsReq.Filepath) + dsRes, _ := json.Marshal(dlMessage) + _, err = downloadConfirmResponse.WriteJson(dsRes) + if err != nil { + respondWithError(res, err) + continue + } + + downloadConfirmResponse.StatusCode = http.StatusOK + if err := conn.WriteMessage(messageType, downloadConfirmResponse.Marshal()); err != nil { + respondWithError(res, err) + continue + } + + res.StatusCode = http.StatusOK + _, err = res.Write(buf.Bytes()) + if err != nil { + respondWithError(res, err) + continue + } + messageType = websocket.BinaryMessage + // if err := conn.WriteMessage(messageType, res.Marshal()); err != nil { + // respondWithError(res, err) + // return err + // } + // + // messageType = websocket.TextMessage + // dlFinishedMessage := map[string]string{} + // dlFinishedMessage["message"] = "download finished" + // finishedRes, _ := json.Marshal(dlFinishedMessage) + // res.StatusCode = http.StatusOK + // _, err = res.WriteJson(finishedRes) + // if err := conn.WriteMessage(messageType, res.Marshal()); err != nil { + // respondWithError(res, err) + // return err + // } + + logEventDescription(string(common.FileDownload), to, res.StatusCode, h.logger) + case common.FileUpload, common.FileUploadStream: + streaming := false + if req.Event == common.FileUploadStream { + streaming = true + } + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + fsReq := &common.FileRequest{} + if err := json.Unmarshal(jsonBytes, fsReq); err != nil { + respondWithError(res, err) + continue + } + + fileName := fsReq.FileName + compression := strings.ToLower(fsReq.Compression) + contentLength := fsReq.ContentLength + + data := &bytes.Buffer{} + if streaming { + if contentLength == "" || contentLength == "0" { + respondWithError(res, fmt.Errorf("streaming needs \"content_length\"")) + continue + } + var totalRead int64 = 0 + for { + mt, reader, err := conn.NextReader() + if err != nil { + respondWithError(res, err) + continue + } + if mt != websocket.BinaryMessage { + respondWithError(res, fmt.Errorf("file content should be as binary message")) + continue + } + n, err := io.Copy(data, reader) + if err != nil { + respondWithError(res, err) + continue + } + totalRead += n + if fmt.Sprintf("%d", totalRead) == contentLength { + h.logger.Debug("streamed full content") + break + } + } + } else { + mt, reader, err := conn.NextReader() + if err != nil { + respondWithError(res, err) + continue + } + if mt != websocket.BinaryMessage { + respondWithError(res, fmt.Errorf("file content should be as binary message")) + continue + } + _, err = io.Copy(data, reader) + if err != nil { + respondWithError(res, err) + continue + } + } + bs, err := humanize.ParseBytes(fsReq.BlockSize) + if err != nil { + respondWithError(res, err) + continue + } + err = h.dfsAPI.UploadFile(fsReq.PodName, fileName, sessionID, int64(len(data.Bytes())), data, fsReq.DirPath, compression, uint32(bs), fsReq.Overwrite) + if err != nil { + respondWithError(res, err) + continue + } + responses := &UploadResponse{FileName: fileName, Message: "uploaded successfully"} + messageBytes, err := json.Marshal(responses) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.FileUpload), to, res.StatusCode, h.logger) + case common.FileShare: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + fsReq := &common.FileSystemRequest{} + err = json.Unmarshal(jsonBytes, fsReq) + if err != nil { + respondWithError(res, err) + continue + } + sharingRef, err := h.dfsAPI.ShareFile(fsReq.PodName, fsReq.DirectoryPath, fsReq.Destination, sessionID) + if err != nil { + respondWithError(res, err) + continue + } + fsShareResponse := &FileSharingReference{ + Reference: sharingRef, + } + messageBytes, err := json.Marshal(fsShareResponse) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.FileShare), to, res.StatusCode, h.logger) + case common.FileReceive: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + fsReq := &common.FileReceiveRequest{} + err = json.Unmarshal(jsonBytes, fsReq) + if err != nil { + respondWithError(res, err) + continue + } + sharingRef, err := utils.ParseSharingReference(fsReq.SharingReference) + if err != nil { + respondWithError(res, err) + continue + } + filePath, err := h.dfsAPI.ReceiveFile(fsReq.PodName, fsReq.DirectoryPath, sharingRef, sessionID) + if err != nil { + respondWithError(res, err) + continue + } + fsReceiveResponse := &ReceiveFileResponse{ + FileName: filePath, + } + messageBytes, err := json.Marshal(fsReceiveResponse) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.FileReceive), to, res.StatusCode, h.logger) + case common.FileReceiveInfo: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + fsReq := &common.FileReceiveRequest{} + err = json.Unmarshal(jsonBytes, fsReq) + if err != nil { + respondWithError(res, err) + continue + } + sharingRef, err := utils.ParseSharingReference(fsReq.SharingReference) + if err != nil { + respondWithError(res, err) + continue + } + receiveInfo, err := h.dfsAPI.ReceiveInfo(sessionID, sharingRef) + if err != nil { + respondWithError(res, err) + continue + } + messageBytes, err := json.Marshal(receiveInfo) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.FileReceiveInfo), to, res.StatusCode, h.logger) + case common.FileDelete: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + fsReq := &common.FileSystemRequest{} + err = json.Unmarshal(jsonBytes, fsReq) + if err != nil { + respondWithError(res, err) + continue + } + err = h.dfsAPI.DeleteFile(fsReq.PodName, fsReq.FilePath, sessionID) + if err != nil { + respondWithError(res, err) + continue + } + message := map[string]interface{}{} + message["message"] = "file deleted successfully" + + messageBytes, err := json.Marshal(message) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.FileDelete), to, res.StatusCode, h.logger) + case common.FileStat: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + fsReq := &common.FileSystemRequest{} + err = json.Unmarshal(jsonBytes, fsReq) + if err != nil { + respondWithError(res, err) + continue + } + stat, err := h.dfsAPI.FileStat(fsReq.PodName, fsReq.DirectoryPath, sessionID) + if err != nil { + respondWithError(res, err) + continue + } + messageBytes, err := json.Marshal(stat) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.FileStat), to, res.StatusCode, h.logger) + + // kv related events + case common.KVCreate: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + kvReq := &common.KVRequest{} + err = json.Unmarshal(jsonBytes, kvReq) + if err != nil { + respondWithError(res, err) + continue + } + idxType := kvReq.IndexType + if idxType == "" { + idxType = "string" + } + + var indexType collection.IndexType + switch idxType { + case "string": + indexType = collection.StringIndex + case "number": + indexType = collection.NumberIndex + case "bytes": + default: + respondWithError(res, fmt.Errorf("kv create: invalid \"indexType\" ")) + continue + } + err = h.dfsAPI.KVCreate(sessionID, kvReq.PodName, kvReq.TableName, indexType) + if err != nil { + respondWithError(res, err) + continue + } + message := map[string]interface{}{} + message["message"] = "kv store created" + + messageBytes, err := json.Marshal(message) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.KVCreate), to, res.StatusCode, h.logger) + case common.KVList: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + kvReq := &common.KVRequest{} + err = json.Unmarshal(jsonBytes, kvReq) + if err != nil { + respondWithError(res, err) + continue + } + + collections, err := h.dfsAPI.KVList(sessionID, kvReq.PodName) + if err != nil { + respondWithError(res, err) + continue + } + var col Collections + for k, v := range collections { + m := Collection{ + Name: k, + IndexedColumns: v, + CollectionType: "KV Store", + } + col.Tables = append(col.Tables, m) + } + + messageBytes, err := json.Marshal(col) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.KVList), to, res.StatusCode, h.logger) + case common.KVOpen: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + kvReq := &common.KVRequest{} + err = json.Unmarshal(jsonBytes, kvReq) + if err != nil { + respondWithError(res, err) + continue + } + + err = h.dfsAPI.KVOpen(sessionID, kvReq.PodName, kvReq.TableName) + if err != nil { + respondWithError(res, err) + continue + } + message := map[string]interface{}{} + message["message"] = "kv store created" + + messageBytes, err := json.Marshal(message) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.KVOpen), to, res.StatusCode, h.logger) + case common.KVCount: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + kvReq := &common.KVRequest{} + err = json.Unmarshal(jsonBytes, kvReq) + if err != nil { + respondWithError(res, err) + continue + } + + count, err := h.dfsAPI.KVCount(sessionID, kvReq.PodName, kvReq.TableName) + if err != nil { + respondWithError(res, err) + continue + } + messageBytes, err := json.Marshal(count) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.KVCount), to, res.StatusCode, h.logger) + case common.KVDelete: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + kvReq := &common.KVRequest{} + err = json.Unmarshal(jsonBytes, kvReq) + if err != nil { + respondWithError(res, err) + continue + } + + err = h.dfsAPI.KVDelete(sessionID, kvReq.PodName, kvReq.TableName) + if err != nil { + respondWithError(res, err) + continue + } + message := map[string]interface{}{} + message["message"] = "kv store deleted" + + messageBytes, err := json.Marshal(message) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.KVDelete), to, res.StatusCode, h.logger) + case common.KVEntryPresent: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + kvReq := &common.KVRequest{} + err = json.Unmarshal(jsonBytes, kvReq) + if err != nil { + respondWithError(res, err) + continue + } + presentResponse := &PresentResponse{ + Present: true, + } + _, _, err = h.dfsAPI.KVGet(sessionID, kvReq.PodName, kvReq.TableName, kvReq.Key) + if err != nil { + presentResponse.Present = false + } + messageBytes, err := json.Marshal(presentResponse) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.KVEntryPresent), to, res.StatusCode, h.logger) + case common.KVEntryPut: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + kvReq := &common.KVRequest{} + err = json.Unmarshal(jsonBytes, kvReq) + if err != nil { + respondWithError(res, err) + continue + } + err = h.dfsAPI.KVPut(sessionID, kvReq.PodName, kvReq.TableName, kvReq.Key, []byte(kvReq.Value)) + if err != nil { + respondWithError(res, err) + continue + } + message := map[string]interface{}{} + message["message"] = "key added" + + messageBytes, err := json.Marshal(message) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.KVEntryPut), to, res.StatusCode, h.logger) + case common.KVEntryGet: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + kvReq := &common.KVRequest{} + err = json.Unmarshal(jsonBytes, kvReq) + if err != nil { + respondWithError(res, err) + continue + } + + columns, data, err := h.dfsAPI.KVGet(sessionID, kvReq.PodName, kvReq.TableName, kvReq.Key) + if err != nil { + respondWithError(res, err) + continue + } + var resp KVResponse + if columns != nil { + resp.Keys = columns + } else { + resp.Keys = []string{kvReq.Key} + } + resp.Values = data + messageBytes, err := json.Marshal(resp) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.KVEntryGet), to, res.StatusCode, h.logger) + case common.KVEntryDelete: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + kvReq := &common.KVRequest{} + err = json.Unmarshal(jsonBytes, kvReq) + if err != nil { + respondWithError(res, err) + continue + } + + _, err = h.dfsAPI.KVDel(sessionID, kvReq.PodName, kvReq.TableName, kvReq.Key) + if err != nil { + respondWithError(res, err) + continue + } + + message := map[string]interface{}{} + message["message"] = "key deleted" + + messageBytes, err := json.Marshal(message) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.KVEntryDelete), to, res.StatusCode, h.logger) + // case common.KVLoadCSV, common.KVLoadCSVStream: + // streaming := false + // if req.Event == common.KVLoadCSVStream { + // streaming = true + // } + // httpReq, err := newMultipartRequestWithBinaryMessage(req.Params, "csv", http.MethodPost, string(req.Event), streaming) + // if err != nil { + // respondWithError(res, err) + // continue + // } + // + // h.KVLoadCSVHandler(res, httpReq) + // logEventDescription(string(common.KVLoadCSV), to, res.StatusCode, h.logger) + case common.KVSeek: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + kvReq := &common.KVRequest{} + err = json.Unmarshal(jsonBytes, kvReq) + if err != nil { + respondWithError(res, err) + continue + } + + if kvReq.Limit == "" { + kvReq.Limit = DefaultSeekLimit + } + noOfRows, err := strconv.ParseInt(kvReq.Limit, 10, 64) + if err != nil { + respondWithError(res, err) + continue + } + _, err = h.dfsAPI.KVSeek(sessionID, kvReq.PodName, kvReq.TableName, + kvReq.StartPrefix, kvReq.EndPrefix, noOfRows) + if err != nil { + respondWithError(res, err) + continue + } + message := map[string]interface{}{} + message["message"] = "seeked closest to the start key" + + messageBytes, err := json.Marshal(message) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.KVSeek), to, res.StatusCode, h.logger) + case common.KVSeekNext: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + + kvReq := &common.KVRequest{} + err = json.Unmarshal(jsonBytes, kvReq) + if err != nil { + respondWithError(res, err) + continue + } + + columns, key, data, err := h.dfsAPI.KVGetNext(sessionID, kvReq.PodName, kvReq.TableName) + if err != nil { + respondWithError(res, err) + continue + } + resp := &KVResponse{} + if columns != nil { + resp.Keys = columns + } else { + resp.Keys = []string{key} + } + resp.Values = data + + messageBytes, err := json.Marshal(resp) + if err != nil { + respondWithError(res, err) + continue + } + + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + + logEventDescription(string(common.KVSeekNext), to, res.StatusCode, h.logger) + + // doc related events + case common.DocCreate: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + docReq := &common.DocRequest{} + err = json.Unmarshal(jsonBytes, docReq) + if err != nil { + respondWithError(res, err) + continue + } + indexes := make(map[string]collection.IndexType) + si := docReq.SimpleIndex + if si != "" { + idxs := strings.Split(si, ",") + for _, idx := range idxs { + nt := strings.Split(idx, "=") + if len(nt) != 2 { + respondWithError(res, fmt.Errorf("doc create: \"si\" invalid argument")) + continue + } + switch nt[1] { + case "string": + indexes[nt[0]] = collection.StringIndex + case "number": + indexes[nt[0]] = collection.NumberIndex + case "map": + indexes[nt[0]] = collection.MapIndex + case "list": + indexes[nt[0]] = collection.ListIndex + case "bytes": + default: + respondWithError(res, fmt.Errorf("doc create: invalid \"indexType\" ")) + continue + } + } + } + + err = h.dfsAPI.DocCreate(sessionID, docReq.PodName, docReq.TableName, + indexes, docReq.Mutable) + if err != nil { + respondWithError(res, err) + continue + } + message := map[string]interface{}{} + message["message"] = "document db created" + + messageBytes, err := json.Marshal(message) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.DocCreate), to, res.StatusCode, h.logger) + case common.DocList: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + docReq := &common.DocRequest{} + err = json.Unmarshal(jsonBytes, docReq) + if err != nil { + respondWithError(res, err) + continue + } + collections, err := h.dfsAPI.DocList(sessionID, docReq.PodName) + if err != nil { + respondWithError(res, err) + continue + } + var col DocumentDBs + for name, dbSchema := range collections { + var indexes []collection.SIndex + indexes = append(indexes, dbSchema.SimpleIndexes...) + indexes = append(indexes, dbSchema.MapIndexes...) + indexes = append(indexes, dbSchema.ListIndexes...) + m := documentDB{ + Name: name, + IndexedColumns: indexes, + CollectionType: "Document Store", + } + col.Tables = append(col.Tables, m) + } + messageBytes, err := json.Marshal(col) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.DocList), to, res.StatusCode, h.logger) + case common.DocOpen: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + docReq := &common.DocRequest{} + err = json.Unmarshal(jsonBytes, docReq) + if err != nil { + respondWithError(res, err) + continue + } + err = h.dfsAPI.DocOpen(sessionID, docReq.PodName, docReq.TableName) + if err != nil { + respondWithError(res, err) + continue + } + message := map[string]interface{}{} + message["message"] = "document store opened" + + messageBytes, err := json.Marshal(message) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.DocOpen), to, res.StatusCode, h.logger) + case common.DocCount: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + docReq := &common.DocRequest{} + err = json.Unmarshal(jsonBytes, docReq) + if err != nil { + respondWithError(res, err) + continue + } + count, err := h.dfsAPI.DocCount(sessionID, docReq.PodName, docReq.TableName, docReq.Expression) + if err != nil { + respondWithError(res, err) + continue + } + messageBytes, err := json.Marshal(count) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.DocCount), to, res.StatusCode, h.logger) + case common.DocDelete: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + docReq := &common.DocRequest{} + err = json.Unmarshal(jsonBytes, docReq) + if err != nil { + respondWithError(res, err) + continue + } + err = h.dfsAPI.DocDelete(sessionID, docReq.PodName, docReq.TableName) + if err != nil { + respondWithError(res, err) + continue + } + message := map[string]interface{}{} + message["message"] = "document store deleted" + + messageBytes, err := json.Marshal(message) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.DocDelete), to, res.StatusCode, h.logger) + case common.DocFind: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + docReq := &common.DocRequest{} + err = json.Unmarshal(jsonBytes, docReq) + if err != nil { + respondWithError(res, err) + continue + } + var limitInt int + if docReq.Limit == "" { + limitInt = 10 + } else { + lmt, err := strconv.Atoi(docReq.Limit) + if err != nil { + respondWithError(res, fmt.Errorf("doc find: invalid value for argument \"limit\"")) + continue + } + limitInt = lmt + } + data, err := h.dfsAPI.DocFind(sessionID, docReq.PodName, docReq.TableName, docReq.Expression, limitInt) + if err != nil { + respondWithError(res, err) + continue + } + var docs DocFindResponse + docs.Docs = data + messageBytes, err := json.Marshal(docs) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.DocFind), to, res.StatusCode, h.logger) + case common.DocEntryPut: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + docReq := &common.DocRequest{} + err = json.Unmarshal(jsonBytes, docReq) + if err != nil { + respondWithError(res, err) + continue + } + err = h.dfsAPI.DocPut(sessionID, docReq.PodName, docReq.TableName, []byte(docReq.Document)) + if err != nil { + respondWithError(res, err) + continue + } + message := map[string]interface{}{} + message["message"] = "added document to db" + + messageBytes, err := json.Marshal(message) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.DocEntryPut), to, res.StatusCode, h.logger) + case common.DocEntryGet: + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + docReq := &common.DocRequest{} + err = json.Unmarshal(jsonBytes, docReq) + if err != nil { + respondWithError(res, err) + continue + } + data, err := h.dfsAPI.DocGet(sessionID, docReq.PodName, docReq.TableName, docReq.ID) + if err != nil { + respondWithError(res, err) + continue + } + var getResponse DocGetResponse + getResponse.Doc = data + + messageBytes, err := json.Marshal(getResponse) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.DocEntryGet), to, res.StatusCode, h.logger) case common.DocEntryDel: - jsonBytes, _ := json.Marshal(req.Params) - httpReq, err := newRequest(http.MethodDelete, string(common.DocEntryDel), jsonBytes) + jsonBytes, err := json.Marshal(req.Params) if err != nil { respondWithError(res, err) continue } - h.DocDelHandler(res, httpReq) - logEventDescription(string(common.DocEntryDel), to, res.StatusCode, h.logger) - case common.DocLoadJson, common.DocLoadJsonStream: - streaming := false - if req.Event == common.DocLoadJsonStream { - streaming = true + docReq := &common.DocRequest{} + err = json.Unmarshal(jsonBytes, docReq) + if err != nil { + respondWithError(res, err) + continue } - httpReq, err := newMultipartRequestWithBinaryMessage(req.Params, "json", http.MethodPost, string(req.Event), streaming) + err = h.dfsAPI.DocDel(sessionID, docReq.PodName, docReq.TableName, docReq.ID) if err != nil { respondWithError(res, err) continue } + message := map[string]interface{}{} + message["message"] = "deleted document from db" - h.DocLoadJsonHandler(res, httpReq) - logEventDescription(string(common.DocLoadJson), to, res.StatusCode, h.logger) + messageBytes, err := json.Marshal(message) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) + if err != nil { + respondWithError(res, err) + continue + } + logEventDescription(string(common.DocEntryDel), to, res.StatusCode, h.logger) + // case common.DocLoadJson, common.DocLoadJsonStream: + // streaming := false + // if req.Event == common.DocLoadJsonStream { + // streaming = true + // } + // httpReq, err := newMultipartRequestWithBinaryMessage(req.Params, "json", http.MethodPost, string(req.Event), streaming) + // if err != nil { + // respondWithError(res, err) + // continue + // } + // + // h.DocLoadJsonHandler(res, httpReq) + // logEventDescription(string(common.DocLoadJson), to, res.StatusCode, h.logger) case common.DocIndexJson: - jsonBytes, _ := json.Marshal(req.Params) - httpReq, err := newRequest(http.MethodPost, string(common.DocIndexJson), jsonBytes) + jsonBytes, err := json.Marshal(req.Params) + if err != nil { + respondWithError(res, err) + continue + } + docReq := &common.DocRequest{} + err = json.Unmarshal(jsonBytes, docReq) + if err != nil { + respondWithError(res, err) + continue + } + err = h.dfsAPI.DocIndexJson(sessionID, docReq.PodName, docReq.TableName, docReq.FileName) + if err != nil { + respondWithError(res, err) + continue + } + message := map[string]interface{}{} + message["message"] = "indexing started" + + messageBytes, err := json.Marshal(message) + if err != nil { + respondWithError(res, err) + continue + } + res.StatusCode = http.StatusOK + _, err = res.WriteJson(messageBytes) if err != nil { respondWithError(res, err) continue } - h.DocIndexJsonHandler(res, httpReq) logEventDescription(string(common.DocIndexJson), to, res.StatusCode, h.logger) + default: + respondWithError(res, fmt.Errorf("unknown event")) + continue } if err := conn.SetWriteDeadline(time.Now().Add(readDeadline)); err != nil { return err diff --git a/pkg/blockstore/bee/client.go b/pkg/blockstore/bee/client.go index c8524458..e844a21b 100644 --- a/pkg/blockstore/bee/client.go +++ b/pkg/blockstore/bee/client.go @@ -404,36 +404,39 @@ func (s *Client) DownloadBlob(address []byte) ([]byte, int, error) { // DeleteReference unpins a reference so that it will be garbage collected by the Swarm network. func (s *Client) DeleteReference(address []byte) error { - // TODO uncomment the code once unpinning is fixed - //to := time.Now() - //addrString := swarm.NewAddress(address).String() - // - //fullUrl := s.url + pinsUrl + addrString - //req, err := http.NewRequest(http.MethodDelete, fullUrl, http.NoBody) - //if err != nil { - // return err - //} - // - //response, err := s.client.Do(req) - //if err != nil { - // return err - //} - //defer response.Body.Close() - // - //req.Close = true - //if response.StatusCode != http.StatusOK { - // respData, err := io.ReadAll(response.Body) - // if err != nil { - // return err - // } - // return fmt.Errorf("failed to unpin reference : %s", respData) - //} - // - //fields := logrus.Fields{ - // "reference": addrString, - // "duration": time.Since(to).String(), - //} - //s.logger.WithFields(fields).Log(logrus.DebugLevel, "delete chunk: ") + // TODO uncomment after unpinning is fixed + _ = address + /* + to := time.Now() + addrString := swarm.NewAddress(address).String() + + fullUrl := s.url + pinsUrl + addrString + req, err := http.NewRequest(http.MethodDelete, fullUrl, http.NoBody) + if err != nil { + return err + } + + response, err := s.client.Do(req) + if err != nil { + return err + } + defer response.Body.Close() + + req.Close = true + if response.StatusCode != http.StatusOK { + respData, err := io.ReadAll(response.Body) + if err != nil { + return err + } + return fmt.Errorf("failed to unpin reference : %s", respData) + } + + fields := logrus.Fields{ + "reference": addrString, + "duration": time.Since(to).String(), + } + s.logger.WithFields(fields).Log(logrus.DebugLevel, "delete chunk: ") + */ return nil } diff --git a/pkg/blockstore/bee/mock/client.go b/pkg/blockstore/bee/mock/client.go index cfc3b59a..85d3d966 100644 --- a/pkg/blockstore/bee/mock/client.go +++ b/pkg/blockstore/bee/mock/client.go @@ -114,7 +114,7 @@ func (m *BeeClient) UploadBlob(data []byte, _, _ bool) (address []byte, err erro } // DownloadBlob from swarm -func (m *BeeClient) DownloadBlob(address []byte) (data []byte, respCode int, err error) { +func (m *BeeClient) DownloadBlob(address []byte) ([]byte, int, error) { m.storerMu.Lock() defer m.storerMu.Unlock() if data, ok := m.storer[swarm.NewAddress(address).String()]; ok { diff --git a/pkg/collection/batch.go b/pkg/collection/batch.go index a4af692e..590e8f14 100644 --- a/pkg/collection/batch.go +++ b/pkg/collection/batch.go @@ -168,7 +168,7 @@ func (b *Batch) Write(podFile string) (*Manifest, error) { } if b.memDb.dirtyFlag { - diskManifest, err := b.idx.loadManifest(b.memDb.Name) + diskManifest, err := b.idx.loadManifest(b.memDb.Name, b.idx.encryptionPassword) if err != nil && errors.Is(err, ErrNoManifestFound) { // skipcq: TCV-001 return nil, err } @@ -203,7 +203,7 @@ func (b *Batch) mergeAndWriteManifest(diskManifest, memManifest *Manifest) (*Man if diskManifest.dirtyFlag { // save th disk manifest - err := b.idx.updateManifest(diskManifest) + err := b.idx.updateManifest(diskManifest, b.idx.encryptionPassword) if err != nil { // skipcq: TCV-001 return nil, err } @@ -242,9 +242,11 @@ func (b *Batch) emptyManifestStack() error { // skipcq: TCV-001 func (b *Batch) storeMemoryManifest(manifest *Manifest, depth int) error { - //var wg sync.WaitGroup - //errC := make(chan error) - //wgDone := make(chan bool) + /* + var wg sync.WaitGroup + errC := make(chan error) + wgDone := make(chan bool) + */ // store any branches in this manifest for _, entry := range manifest.Entries { @@ -255,36 +257,36 @@ func (b *Batch) storeMemoryManifest(manifest *Manifest, depth int) error { entry.Manifest = nil return nil } - //wg.Add(1) - //go func() { - // defer func() { - // wg.Done() - // }() + // wg.Add(1) + // go func() { + // defer func() { + // wg.Done() + // }() err := b.storeMemoryManifest(entry.Manifest, depth+1) if err != nil { return err } - //}() + // }() } } - //go func() { - // wg.Wait() - // close(wgDone) - //}() + // go func() { + // wg.Wait() + // close(wgDone) + // }() // - //select { - //case <-wgDone: - // break - //case err := <-errC: - // close(errC) - // return err - //} + // select { + // case <-wgDone: + // break + // case err := <-errC: + // close(errC) + // return err + // } // store this manifest - //go func() { - err := b.idx.storeManifest(manifest) + // go func() { + err := b.idx.storeManifest(manifest, b.idx.encryptionPassword) if err != nil { return err } @@ -294,6 +296,6 @@ func (b *Batch) storeMemoryManifest(manifest *Manifest, depth int) error { fmt.Println(count) } - //}() + // }() return nil } diff --git a/pkg/collection/batch_test.go b/pkg/collection/batch_test.go index 623a33c6..274f694f 100644 --- a/pkg/collection/batch_test.go +++ b/pkg/collection/batch_test.go @@ -21,6 +21,9 @@ import ( "io" "testing" + "github.com/fairdatasociety/fairOS-dfs/pkg/pod" + "github.com/fairdatasociety/fairOS-dfs/pkg/utils" + "github.com/fairdatasociety/fairOS-dfs/pkg/account" "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" "github.com/fairdatasociety/fairOS-dfs/pkg/collection" @@ -33,16 +36,16 @@ func TestBatchIndex(t *testing.T) { logger := logging.New(io.Discard, 0) acc := account.New(logger) ai := acc.GetUserAccountInfo() - _, _, err := acc.CreateUserAccount("password", "") + _, _, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } fd := feed.New(acc.GetUserAccountInfo(), mockClient, logger) user := acc.GetAddress(account.UserAccountIndex) - + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) t.Run("batch-add-docs", func(t *testing.T) { // create a DB and open it - index := createAndOpenIndex(t, "pod1", "testdb_batch_0", collection.StringIndex, fd, user, mockClient, ai, logger) + index := createAndOpenIndex(t, "pod1", "testdb_batch_0", podPassword, collection.StringIndex, fd, user, mockClient, ai, logger) // batch load and delete batch, err := collection.NewBatch(index) @@ -79,7 +82,7 @@ func TestBatchIndex(t *testing.T) { t.Run("batch-add-docs", func(t *testing.T) { // create a DB and open it - index := createAndOpenIndex(t, "pod1", "testdb_batch_1", collection.StringIndex, fd, user, mockClient, ai, logger) + index := createAndOpenIndex(t, "pod1", "testdb_batch_1", podPassword, collection.StringIndex, fd, user, mockClient, ai, logger) // batch load and delete batch, err := collection.NewBatch(index) @@ -116,7 +119,7 @@ func TestBatchIndex(t *testing.T) { t.Run("batch-add-del-docs", func(t *testing.T) { // create a DB and open it - index := createAndOpenIndex(t, "pod1", "testdb_batch_2", collection.StringIndex, fd, user, mockClient, ai, logger) + index := createAndOpenIndex(t, "pod1", "testdb_batch_2", podPassword, collection.StringIndex, fd, user, mockClient, ai, logger) // batch load and delete batch, err := collection.NewBatch(index) @@ -145,7 +148,7 @@ func TestBatchIndex(t *testing.T) { if err != nil { t.Fatal(err) } - index2, err := collection.OpenIndex("pod1", "testdb_batch_2", "key", fd, ai, user, mockClient, logger) + index2, err := collection.OpenIndex("pod1", "testdb_batch_2", "key", podPassword, fd, ai, user, mockClient, logger) if err != nil { t.Fatal(err) } diff --git a/pkg/collection/document.go b/pkg/collection/document.go index bc0d832d..a969ba05 100644 --- a/pkg/collection/document.go +++ b/pkg/collection/document.go @@ -102,7 +102,7 @@ func NewDocumentStore(podName string, fd *feed.API, ai *account.Info, user utils } // CreateDocumentDB creates a new document database and its related indexes. -func (d *Document) CreateDocumentDB(dbName string, indexes map[string]IndexType, mutable bool) error { +func (d *Document) CreateDocumentDB(dbName, encryptionPassword string, indexes map[string]IndexType, mutable bool) error { d.logger.Info("creating document db: ", dbName) if d.fd.IsReadOnlyFeed() { d.logger.Errorf("creating document db: %v", ErrReadOnlyIndex) @@ -116,7 +116,7 @@ func (d *Document) CreateDocumentDB(dbName string, indexes map[string]IndexType, } // load the existing db's and see if this name is already there - docTables, err := d.LoadDocumentDBSchemas() + docTables, err := d.LoadDocumentDBSchemas(encryptionPassword) if err != nil { // skipcq: TCV-001 return err } @@ -127,7 +127,7 @@ func (d *Document) CreateDocumentDB(dbName string, indexes map[string]IndexType, // since this db is not present already, create the table d.logger.Info("creating simple index: ", DefaultIndexFieldName) - err = CreateIndex(d.podName, dbName, DefaultIndexFieldName, StringIndex, d.fd, d.user, d.client, mutable) + err = CreateIndex(d.podName, dbName, DefaultIndexFieldName, encryptionPassword, StringIndex, d.fd, d.user, d.client, mutable) if err != nil { // skipcq: TCV-001 return err } @@ -146,7 +146,7 @@ func (d *Document) CreateDocumentDB(dbName string, indexes map[string]IndexType, // Now add the other indexes to simpleIndexes array for fieldName, fieldType := range indexes { // create the simple index - err = CreateIndex(d.podName, dbName, fieldName, fieldType, d.fd, d.user, d.client, mutable) + err = CreateIndex(d.podName, dbName, fieldName, encryptionPassword, fieldType, d.fd, d.user, d.client, mutable) if err != nil { // skipcq: TCV-001 return err } @@ -175,7 +175,7 @@ func (d *Document) CreateDocumentDB(dbName string, indexes map[string]IndexType, ListIndexes: listIndexes, } - err = d.storeDocumentDBSchemas(docTables) + err = d.storeDocumentDBSchemas(encryptionPassword, docTables) if err != nil { // skipcq: TCV-001 d.logger.Errorf("creating document db: %v", err.Error()) return err @@ -185,7 +185,7 @@ func (d *Document) CreateDocumentDB(dbName string, indexes map[string]IndexType, } // OpenDocumentDB open a document database and its related indexes. -func (d *Document) OpenDocumentDB(dbName string) error { +func (d *Document) OpenDocumentDB(dbName, encryptionPassword string) error { d.logger.Info("opening document db: ", dbName) // check if the db is already present and opened if d.IsDBOpened(dbName) { // skipcq: TCV-001 @@ -194,7 +194,7 @@ func (d *Document) OpenDocumentDB(dbName string) error { } // load the existing db's and see if this name is present - docTables, err := d.LoadDocumentDBSchemas() + docTables, err := d.LoadDocumentDBSchemas(encryptionPassword) if err != nil { // skipcq: TCV-001 d.logger.Errorf("opening document db: %v", err.Error()) return err @@ -209,7 +209,7 @@ func (d *Document) OpenDocumentDB(dbName string) error { simpleIndexs := make(map[string]*Index) for _, si := range schema.SimpleIndexes { d.logger.Info("opening simple index: ", si.FieldName) - idx, err := OpenIndex(d.podName, dbName, si.FieldName, d.fd, d.ai, d.user, d.client, d.logger) + idx, err := OpenIndex(d.podName, dbName, si.FieldName, encryptionPassword, d.fd, d.ai, d.user, d.client, d.logger) if err != nil { // skipcq: TCV-001 d.logger.Errorf("opening simple index: %v", err.Error()) return err @@ -221,7 +221,7 @@ func (d *Document) OpenDocumentDB(dbName string) error { mapIndexs := make(map[string]*Index) for _, mi := range schema.MapIndexes { d.logger.Info("opening map index: ", mi.FieldName) - idx, err := OpenIndex(d.podName, dbName, mi.FieldName, d.fd, d.ai, d.user, d.client, d.logger) + idx, err := OpenIndex(d.podName, dbName, mi.FieldName, encryptionPassword, d.fd, d.ai, d.user, d.client, d.logger) if err != nil { // skipcq: TCV-001 d.logger.Errorf("opening map index: %v", err.Error()) return err @@ -233,7 +233,7 @@ func (d *Document) OpenDocumentDB(dbName string) error { listIndexes := make(map[string]*Index) for _, li := range schema.ListIndexes { d.logger.Info("opening list index: ", li.FieldName) - idx, err := OpenIndex(d.podName, dbName, li.FieldName, d.fd, d.ai, d.user, d.client, d.logger) + idx, err := OpenIndex(d.podName, dbName, li.FieldName, encryptionPassword, d.fd, d.ai, d.user, d.client, d.logger) if err != nil { // skipcq: TCV-001 d.logger.Errorf("opening list index: %v", err.Error()) return err @@ -257,7 +257,7 @@ func (d *Document) OpenDocumentDB(dbName string) error { } // DeleteDocumentDB a document DB, all its data and its related indxes. -func (d *Document) DeleteDocumentDB(dbName string) error { +func (d *Document) DeleteDocumentDB(dbName, encryptionPassword string) error { d.logger.Info("deleting document db: ", dbName) if d.fd.IsReadOnlyFeed() { // skipcq: TCV-001 d.logger.Errorf("deleting document db: %v", ErrReadOnlyIndex) @@ -265,7 +265,7 @@ func (d *Document) DeleteDocumentDB(dbName string) error { } // load the existing db's and see if this name is already there - docTables, err := d.LoadDocumentDBSchemas() + docTables, err := d.LoadDocumentDBSchemas(encryptionPassword) if err != nil { // skipcq: TCV-001 d.logger.Errorf("deleting document db: %v", err.Error()) return err @@ -280,7 +280,7 @@ func (d *Document) DeleteDocumentDB(dbName string) error { // open and delete the indexes if !d.IsDBOpened(dbName) { - err = d.OpenDocumentDB(dbName) + err = d.OpenDocumentDB(dbName, encryptionPassword) if err != nil { // skipcq: TCV-001 d.logger.Errorf("deleting document db: %v", err.Error()) return err @@ -292,7 +292,7 @@ func (d *Document) DeleteDocumentDB(dbName string) error { //TODO: before deleting the indexes, unpin all the documents referenced in the ID index for _, si := range docDB.simpleIndexes { d.logger.Info("deleting simple index: ", si.name, si.indexType) - err = si.DeleteIndex() + err = si.DeleteIndex(encryptionPassword) if err != nil { // skipcq: TCV-001 d.logger.Errorf("deleting simple index: %v", err.Error()) return err @@ -300,7 +300,7 @@ func (d *Document) DeleteDocumentDB(dbName string) error { } for _, mi := range docDB.mapIndexes { d.logger.Info("deleting map index: ", mi.name, mi.indexType) - err = mi.DeleteIndex() + err = mi.DeleteIndex(encryptionPassword) if err != nil { // skipcq: TCV-001 d.logger.Errorf("deleting map index: %v", err.Error()) return err @@ -308,7 +308,7 @@ func (d *Document) DeleteDocumentDB(dbName string) error { } for _, li := range docDB.listIndexes { d.logger.Info("deleting list index: ", li.name, li.indexType) - err = li.DeleteIndex() + err = li.DeleteIndex(encryptionPassword) if err != nil { // skipcq: TCV-001 d.logger.Errorf("deleting map index: %v", err.Error()) return err @@ -323,7 +323,7 @@ func (d *Document) DeleteDocumentDB(dbName string) error { } // store the rest of the document db - err = d.storeDocumentDBSchemas(docTables) + err = d.storeDocumentDBSchemas(encryptionPassword, docTables) if err != nil { // skipcq: TCV-001 d.logger.Errorf("deleting document db: ", err.Error()) return err @@ -334,14 +334,14 @@ func (d *Document) DeleteDocumentDB(dbName string) error { } // DeleteAllDocumentDBs deletes all document DBs, all their data and related indxes. -func (d *Document) DeleteAllDocumentDBs() error { +func (d *Document) DeleteAllDocumentDBs(encryptionPassword string) error { if d.fd.IsReadOnlyFeed() { // skipcq: TCV-001 d.logger.Errorf("deleting document db: %v", ErrReadOnlyIndex) return ErrReadOnlyIndex } // load the existing db's and see if this name is already there - docTables, err := d.LoadDocumentDBSchemas() + docTables, err := d.LoadDocumentDBSchemas(encryptionPassword) if err != nil { // skipcq: TCV-001 d.logger.Errorf("deleting document db: %v", err.Error()) return err @@ -350,7 +350,7 @@ func (d *Document) DeleteAllDocumentDBs() error { for dbName := range docTables { // open and delete the indexes if !d.IsDBOpened(dbName) { - err = d.OpenDocumentDB(dbName) + err = d.OpenDocumentDB(dbName, encryptionPassword) if err != nil { // skipcq: TCV-001 d.logger.Errorf("deleting document db: %v", err.Error()) return err @@ -362,7 +362,7 @@ func (d *Document) DeleteAllDocumentDBs() error { //TODO: before deleting the indexes, unpin all the documents referenced in the ID index for _, si := range docDB.simpleIndexes { d.logger.Info("deleting simple index: ", si.name, si.indexType) - err = si.DeleteIndex() + err = si.DeleteIndex(encryptionPassword) if err != nil { // skipcq: TCV-001 d.logger.Errorf("deleting simple index: %v", err.Error()) return err @@ -370,7 +370,7 @@ func (d *Document) DeleteAllDocumentDBs() error { } for _, mi := range docDB.mapIndexes { d.logger.Info("deleting map index: ", mi.name, mi.indexType) - err = mi.DeleteIndex() + err = mi.DeleteIndex(encryptionPassword) if err != nil { // skipcq: TCV-001 d.logger.Errorf("deleting map index: %v", err.Error()) return err @@ -378,7 +378,7 @@ func (d *Document) DeleteAllDocumentDBs() error { } for _, li := range docDB.listIndexes { d.logger.Info("deleting list index: ", li.name, li.indexType) - err = li.DeleteIndex() + err = li.DeleteIndex(encryptionPassword) if err != nil { // skipcq: TCV-001 d.logger.Errorf("deleting map index: %v", err.Error()) return err @@ -391,7 +391,7 @@ func (d *Document) DeleteAllDocumentDBs() error { d.logger.Info("deleted document db: ", dbName) } docTables = map[string]DBSchema{} - err = d.storeDocumentDBSchemas(docTables) + err = d.storeDocumentDBSchemas(encryptionPassword, docTables) if err != nil { // skipcq: TCV-001 d.logger.Errorf("deleting document db: ", err.Error()) return err @@ -415,7 +415,7 @@ func (d *Document) Count(dbName, expr string) (uint64, error) { d.logger.Errorf("counting document db: %v", ErrIndexNotPresent) return 0, ErrIndexNotPresent } - return idx.CountIndex() + return idx.CountIndex(idx.encryptionPassword) } // count documents based on expression @@ -640,7 +640,7 @@ func (d *Document) Put(dbName string, doc []byte) error { } case NumberIndex: val := v.(float64) - //valStr := strconv.FormatFloat(val, 'f', 6, 64) + // valStr := strconv.FormatFloat(val, 'f', 6, 64) err := index.PutNumber(val, ref, NumberIndex, true) if err != nil { // skipcq: TCV-001 d.logger.Errorf("inserting in to document db: ", err.Error()) @@ -659,7 +659,7 @@ func (d *Document) Put(dbName string, doc []byte) error { } // Get retrieves a specific document from a document database matching the dcument id. -func (d *Document) Get(dbName, id string) ([]byte, error) { +func (d *Document) Get(dbName, id, podPassword string) ([]byte, error) { d.logger.Info("getting from document db: ", dbName, id) db := d.getOpenedDb(dbName) if db == nil { // skipcq: TCV-001 @@ -695,7 +695,7 @@ func (d *Document) Get(dbName, id string) ([]byte, error) { return nil, err } - data, err := d.getLineFromFile(idIndex.podFile, seekOffset) + data, err := d.getLineFromFile(idIndex.podFile, podPassword, seekOffset) if err != nil { d.logger.Errorf("getting from document db: ", err.Error()) return nil, err @@ -787,7 +787,7 @@ func (d *Document) Del(dbName, id string) error { } case NumberIndex: val := v.(float64) - //valStr := strconv.FormatFloat(val, 'f', 6, 64) + // valStr := strconv.FormatFloat(val, 'f', 6, 64) _, err := index.DeleteNumber(val) if err != nil { // skipcq: TCV-001 d.logger.Errorf("deleting from document db: ", err.Error()) @@ -815,7 +815,7 @@ func (d *Document) Del(dbName, id string) error { } // Find selects a number of rows from a document database matching an expression. -func (d *Document) Find(dbName, expr string, limit int) ([][]byte, error) { +func (d *Document) Find(dbName, expr, podPassword string, limit int) ([][]byte, error) { d.logger.Info("finding from document db: ", dbName, expr, limit) db := d.getOpenedDb(dbName) if db == nil { // skipcq: TCV-001 @@ -955,7 +955,7 @@ func (d *Document) Find(dbName, expr string, limit int) ([][]byte, error) { d.logger.Errorf("getting from document db: ", err.Error()) return nil, err } - data, err := d.getLineFromFile(idx.podFile, seekOffset) + data, err := d.getLineFromFile(idx.podFile, podPassword, seekOffset) if err != nil { d.logger.Errorf("finding from document db: ", err.Error()) return nil, err @@ -968,10 +968,10 @@ func (d *Document) Find(dbName, expr string, limit int) ([][]byte, error) { } // LoadDocumentDBSchemas loads the schema of all documents belonging to a pod. -func (d *Document) LoadDocumentDBSchemas() (map[string]DBSchema, error) { +func (d *Document) LoadDocumentDBSchemas(encryptionPassword string) (map[string]DBSchema, error) { collections := make(map[string]DBSchema) topic := utils.HashString(documentFile) - _, data, err := d.fd.GetFeedData(topic, d.user) + _, data, err := d.fd.GetFeedData(topic, d.user, []byte(encryptionPassword)) if err != nil { if err.Error() != "feed does not exist or was not updated yet" { // skipcq: TCV-001 return collections, err @@ -1010,7 +1010,7 @@ func (d *Document) IsDBOpened(dbName string) bool { return false } -func (d *Document) storeDocumentDBSchemas(collections map[string]DBSchema) error { +func (d *Document) storeDocumentDBSchemas(encryptionPassword string, collections map[string]DBSchema) error { buf := bytes.NewBuffer(nil) collectionLen := len(collections) if collectionLen > 0 { @@ -1023,7 +1023,7 @@ func (d *Document) storeDocumentDBSchemas(collections map[string]DBSchema) error } } topic := utils.HashString(documentFile) - _, err := d.fd.UpdateFeed(topic, d.user, buf.Bytes()) + _, err := d.fd.UpdateFeed(topic, d.user, buf.Bytes(), []byte(encryptionPassword)) if err != nil { // skipcq: TCV-001 return err } @@ -1074,7 +1074,7 @@ func (*Document) resolveExpression(expr string) (string, string, string, error) } // CreateDocBatch creates a batch index instead of normal index. This is used when doing a bulk insert. -func (d *Document) CreateDocBatch(dbName string) (*DocBatch, error) { +func (d *Document) CreateDocBatch(dbName, podPassword string) (*DocBatch, error) { d.logger.Info("creating batch for inserting in document db: ", dbName) if d.fd.IsReadOnlyFeed() { // skipcq: TCV-001 d.logger.Errorf("creating batch: ", ErrReadOnlyIndex) @@ -1082,7 +1082,7 @@ func (d *Document) CreateDocBatch(dbName string) (*DocBatch, error) { } // see if the document db is empty - data, err := d.Find(dbName, "", 1) + data, err := d.Find(dbName, "", podPassword, 1) if err != nil { if !errors.Is(err, ErrEntryNotFound) { // skipcq: TCV-001 d.logger.Errorf("creating simple batch index: ", err.Error()) @@ -1235,7 +1235,7 @@ func (d *Document) DocBatchPut(docBatch *DocBatch, doc []byte, index int64) erro } case NumberIndex: val := v1.(float64) - //valStr = strconv.FormatFloat(val, 'f', 6, 64) + // valStr = strconv.FormatFloat(val, 'f', 6, 64) _, err := batchIndex.DelNumber(val) if err != nil { d.logger.Errorf("inserting in batch: ", err.Error()) @@ -1380,9 +1380,9 @@ func (d *Document) DocBatchWrite(docBatch *DocBatch, podFile string) error { // DocFileIndex indexes a existing json file in the pod with the document DB. // skipcq: TCV-001 -func (d *Document) DocFileIndex(dbName, podFile string) error { +func (d *Document) DocFileIndex(dbName, podFile, podPassword string) error { d.logger.Info("Indexing file to db: ", podFile, dbName) - reader, err := d.file.OpenFileForIndex(podFile) + reader, err := d.file.OpenFileForIndex(podFile, podPassword) if err != nil { d.logger.Errorf("Indexing file: ", err.Error()) return err @@ -1393,7 +1393,7 @@ func (d *Document) DocFileIndex(dbName, podFile string) error { return err } - batch, err := d.CreateDocBatch(dbName) + batch, err := d.CreateDocBatch(dbName, podPassword) if err != nil { d.logger.Errorf("Indexing file: ", err.Error()) return err @@ -1434,8 +1434,8 @@ func (d *Document) DocFileIndex(dbName, podFile string) error { } // skipcq: TCV-001 -func (d *Document) getLineFromFile(podFile string, seekOffset uint64) ([]byte, error) { - reader, err := d.file.OpenFileForIndex(podFile) +func (d *Document) getLineFromFile(podFile, podPassword string, seekOffset uint64) ([]byte, error) { + reader, err := d.file.OpenFileForIndex(podFile, podPassword) if err != nil { d.logger.Errorf("getting line: ", err.Error()) return nil, err diff --git a/pkg/collection/document_test.go b/pkg/collection/document_test.go index 79f6c430..2cdfcdfb 100644 --- a/pkg/collection/document_test.go +++ b/pkg/collection/document_test.go @@ -17,12 +17,18 @@ limitations under the License. package collection_test import ( + "context" "encoding/json" "errors" "io" "testing" "time" + "github.com/fairdatasociety/fairOS-dfs/pkg/pod" + "github.com/fairdatasociety/fairOS-dfs/pkg/utils" + + "github.com/plexsysio/taskmanager" + "github.com/fairdatasociety/fairOS-dfs/pkg/account" "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" "github.com/fairdatasociety/fairOS-dfs/pkg/collection" @@ -45,36 +51,40 @@ func TestDocumentStore(t *testing.T) { logger := logging.New(io.Discard, 0) acc := account.New(logger) ai := acc.GetUserAccountInfo() - _, _, err := acc.CreateUserAccount("password", "") + _, _, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } fd := feed.New(acc.GetUserAccountInfo(), mockClient, logger) user := acc.GetAddress(account.UserAccountIndex) - file := f.NewFile("pod1", mockClient, fd, user, logger) + tm := taskmanager.New(1, 10, time.Second*15, logger) + defer func() { + _ = tm.Stop(context.Background()) + }() + file := f.NewFile("pod1", mockClient, fd, user, tm, logger) docStore := collection.NewDocumentStore("pod1", fd, ai, user, file, mockClient, logger) - + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) t.Run("create_document_db_errors", func(t *testing.T) { nilFd := feed.New(&account.Info{}, mockClient, logger) nilDocStore := collection.NewDocumentStore("pod1", nilFd, ai, user, file, mockClient, logger) - err := nilDocStore.CreateDocumentDB("docdb_err", nil, true) + err := nilDocStore.CreateDocumentDB("docdb_err", podPassword, nil, true) if !errors.Is(err, collection.ErrReadOnlyIndex) { t.Fatal("should be readonly index") } // create a document DB - createDocumentDBs(t, []string{"docdb_err"}, docStore, nil) + createDocumentDBs(t, []string{"docdb_err"}, docStore, nil, podPassword) - err = docStore.CreateDocumentDB("docdb_err", nil, true) + err = docStore.CreateDocumentDB("docdb_err", podPassword, nil, true) if !errors.Is(err, collection.ErrDocumentDBAlreadyPresent) { t.Fatal("db should be present already") } - err = docStore.OpenDocumentDB("docdb_err") + err = docStore.OpenDocumentDB("docdb_err", podPassword) if err != nil { t.Fatal(err) } - err = docStore.CreateDocumentDB("docdb_err", nil, true) + err = docStore.CreateDocumentDB("docdb_err", podPassword, nil, true) if !errors.Is(err, collection.ErrDocumentDBAlreadyOpened) { t.Fatal("db should be opened already") } @@ -82,10 +92,10 @@ func TestDocumentStore(t *testing.T) { t.Run("create_document_db", func(t *testing.T) { // create a document DB - createDocumentDBs(t, []string{"docdb_0"}, docStore, nil) + createDocumentDBs(t, []string{"docdb_0"}, docStore, nil, podPassword) // load the schem and check the count of simple indexes - schema := loadSchemaAndCheckSimpleIndexCount(t, docStore, "docdb_0", 1) + schema := loadSchemaAndCheckSimpleIndexCount(t, docStore, "docdb_0", podPassword, 1) // check the default index checkIndex(t, schema.SimpleIndexes[0], collection.DefaultIndexFieldName, collection.StringIndex) @@ -93,27 +103,27 @@ func TestDocumentStore(t *testing.T) { t.Run("delete_document_db", func(t *testing.T) { // create multiple document DB - createDocumentDBs(t, []string{"docdb_1_1", "docdb_1_2", "docdb_1_3"}, docStore, nil) - checkIfDBsExists(t, []string{"docdb_1_1", "docdb_1_2", "docdb_1_3"}, docStore) + createDocumentDBs(t, []string{"docdb_1_1", "docdb_1_2", "docdb_1_3"}, docStore, nil, podPassword) + checkIfDBsExists(t, []string{"docdb_1_1", "docdb_1_2", "docdb_1_3"}, docStore, podPassword) // delete the db in the middle - err = docStore.DeleteDocumentDB("docdb_1_2") + err = docStore.DeleteDocumentDB("docdb_1_2", podPassword) if err != nil { t.Fatal(err) } // check if other two db exists - checkIfDBsExists(t, []string{"docdb_1_1", "docdb_1_3"}, docStore) - err = docStore.DeleteDocumentDB("docdb_1_1") + checkIfDBsExists(t, []string{"docdb_1_1", "docdb_1_3"}, docStore, podPassword) + err = docStore.DeleteDocumentDB("docdb_1_1", podPassword) if err != nil { t.Fatal(err) } - err = docStore.DeleteDocumentDB("docdb_1_3") + err = docStore.DeleteDocumentDB("docdb_1_3", podPassword) if err != nil { t.Fatal(err) } - checkIfDBNotExists(t, "docdb_1_1", docStore) - checkIfDBNotExists(t, "docdb_1_3", docStore) + checkIfDBNotExists(t, "docdb_1_1", podPassword, docStore) + checkIfDBNotExists(t, "docdb_1_3", podPassword, docStore) }) t.Run("create_document_db_with_multiple_indexes", func(t *testing.T) { @@ -123,24 +133,24 @@ func TestDocumentStore(t *testing.T) { si["field2"] = collection.NumberIndex si["field3"] = collection.MapIndex si["field4"] = collection.ListIndex - createDocumentDBs(t, []string{"docdb_2"}, docStore, si) + createDocumentDBs(t, []string{"docdb_2"}, docStore, si, podPassword) // load the schem and check the count of simple indexes - schema := loadSchemaAndCheckSimpleIndexCount(t, docStore, "docdb_2", 3) + schema := loadSchemaAndCheckSimpleIndexCount(t, docStore, "docdb_2", podPassword, 3) // first check the default index checkIndex(t, schema.SimpleIndexes[0], collection.DefaultIndexFieldName, collection.StringIndex) checkIndex(t, schema.SimpleIndexes[0], "id", collection.StringIndex) - //second check the field in index 1 + // second check the field in index 1 if schema.SimpleIndexes[1].FieldName == "field1" { checkIndex(t, schema.SimpleIndexes[1], "field1", collection.StringIndex) } else { checkIndex(t, schema.SimpleIndexes[1], "field2", collection.NumberIndex) } - //third check the field in index 2 + // third check the field in index 2 if schema.SimpleIndexes[2].FieldName == "field2" { checkIndex(t, schema.SimpleIndexes[2], "field2", collection.NumberIndex) } else { @@ -159,9 +169,9 @@ func TestDocumentStore(t *testing.T) { t.Run("create_and open_document_db", func(t *testing.T) { // create a document DB - createDocumentDBs(t, []string{"docdb_3"}, docStore, nil) + createDocumentDBs(t, []string{"docdb_3"}, docStore, nil, podPassword) - err := docStore.OpenDocumentDB("docdb_3") + err := docStore.OpenDocumentDB("docdb_3", podPassword) if err != nil { t.Fatal(err) } @@ -170,16 +180,16 @@ func TestDocumentStore(t *testing.T) { if !docStore.IsDBOpened("docdb_3") { t.Fatalf("db not opened") } - }) + t.Run("put_immutable_error", func(t *testing.T) { // create a document DB - err := docStore.CreateDocumentDB("doc_do_immutable", nil, false) + err := docStore.CreateDocumentDB("doc_do_immutable", podPassword, nil, false) if err != nil { t.Fatal(err) } - err = docStore.OpenDocumentDB("doc_do_immutable") + err = docStore.OpenDocumentDB("doc_do_immutable", podPassword) if err != nil { t.Fatal(err) } @@ -205,9 +215,9 @@ func TestDocumentStore(t *testing.T) { t.Run("put_and_get", func(t *testing.T) { // create a document DB - createDocumentDBs(t, []string{"docdb_4"}, docStore, nil) + createDocumentDBs(t, []string{"docdb_4"}, docStore, nil, podPassword) - err := docStore.OpenDocumentDB("docdb_4") + err := docStore.OpenDocumentDB("docdb_4", podPassword) if err != nil { t.Fatal(err) } @@ -263,7 +273,7 @@ func TestDocumentStore(t *testing.T) { } // get the data and test if the retreived data is okay - gotData, err := docStore.Get("docdb_4", "1") + gotData, err := docStore.Get("docdb_4", "1", podPassword) if err != nil { t.Fatal(err) } @@ -288,9 +298,9 @@ func TestDocumentStore(t *testing.T) { si["age"] = collection.NumberIndex si["tag_map"] = collection.MapIndex si["tag_list"] = collection.ListIndex - createDocumentDBs(t, []string{"docdb_5"}, docStore, si) + createDocumentDBs(t, []string{"docdb_5"}, docStore, si, podPassword) - err := docStore.OpenDocumentDB("docdb_5") + err := docStore.OpenDocumentDB("docdb_5", podPassword) if err != nil { t.Fatal(err) } @@ -299,7 +309,7 @@ func TestDocumentStore(t *testing.T) { createTestDocuments(t, docStore, "docdb_5") // get string index and check if the documents returned are okay - docs, err := docStore.Get("docdb_5", "2") + docs, err := docStore.Get("docdb_5", "2", podPassword) if err != nil { t.Fatal(err) } @@ -325,9 +335,9 @@ func TestDocumentStore(t *testing.T) { si["age"] = collection.NumberIndex si["tag_map"] = collection.MapIndex si["tag_list"] = collection.ListIndex - createDocumentDBs(t, []string{"docdb_6"}, docStore, si) + createDocumentDBs(t, []string{"docdb_6"}, docStore, si, podPassword) - err := docStore.OpenDocumentDB("docdb_6") + err := docStore.OpenDocumentDB("docdb_6", podPassword) if err != nil { t.Fatal(err) } @@ -353,9 +363,9 @@ func TestDocumentStore(t *testing.T) { si["age"] = collection.NumberIndex si["tag_map"] = collection.MapIndex si["tag_list"] = collection.ListIndex - createDocumentDBs(t, []string{"docdb_7"}, docStore, si) + createDocumentDBs(t, []string{"docdb_7"}, docStore, si, podPassword) - err := docStore.OpenDocumentDB("docdb_7") + err := docStore.OpenDocumentDB("docdb_7", podPassword) if err != nil { t.Fatal(err) } @@ -415,9 +425,9 @@ func TestDocumentStore(t *testing.T) { si["age"] = collection.NumberIndex si["tag_map"] = collection.MapIndex si["tag_list"] = collection.ListIndex - createDocumentDBs(t, []string{"docdb_8"}, docStore, si) + createDocumentDBs(t, []string{"docdb_8"}, docStore, si, podPassword) - err := docStore.OpenDocumentDB("docdb_8") + err := docStore.OpenDocumentDB("docdb_8", podPassword) if err != nil { t.Fatal(err) } @@ -426,7 +436,7 @@ func TestDocumentStore(t *testing.T) { createTestDocuments(t, docStore, "docdb_8") // String = - docs, err := docStore.Find("docdb_8", "first_name=John", -1) + docs, err := docStore.Find("docdb_8", "first_name=John", podPassword, -1) if err != nil { t.Fatal(err) } @@ -457,7 +467,7 @@ func TestDocumentStore(t *testing.T) { } // tag - docs, err = docStore.Find("docdb_8", "tag_map=tgf21:tgv21", -1) + docs, err = docStore.Find("docdb_8", "tag_map=tgf21:tgv21", podPassword, -1) if err != nil { t.Fatal(err) } @@ -477,7 +487,7 @@ func TestDocumentStore(t *testing.T) { } // Number = - docs, err = docStore.Find("docdb_8", "age=25", -1) + docs, err = docStore.Find("docdb_8", "age=25", podPassword, -1) if err != nil { t.Fatal(err) } @@ -517,7 +527,7 @@ func TestDocumentStore(t *testing.T) { } // Number = with limit - docs, err = docStore.Find("docdb_8", "age=25", 2) + docs, err = docStore.Find("docdb_8", "age=25", podPassword, 2) if err != nil { t.Fatal(err) } @@ -546,7 +556,7 @@ func TestDocumentStore(t *testing.T) { } // Number => - docs, err = docStore.Find("docdb_8", "age=>30", -1) + docs, err = docStore.Find("docdb_8", "age=>30", podPassword, -1) if err != nil { t.Fatal(err) } @@ -575,7 +585,7 @@ func TestDocumentStore(t *testing.T) { } // Number > - docs, err = docStore.Find("docdb_8", "age>30", -1) + docs, err = docStore.Find("docdb_8", "age>30", podPassword, -1) if err != nil { t.Fatal(err) } @@ -599,9 +609,9 @@ func TestDocumentStore(t *testing.T) { si := make(map[string]collection.IndexType) si["first_name"] = collection.StringIndex si["age"] = collection.NumberIndex - createDocumentDBs(t, []string{"docdb_9"}, docStore, si) + createDocumentDBs(t, []string{"docdb_9"}, docStore, si, podPassword) - err := docStore.OpenDocumentDB("docdb_9") + err := docStore.OpenDocumentDB("docdb_9", podPassword) if err != nil { t.Fatal(err) } @@ -613,7 +623,7 @@ func TestDocumentStore(t *testing.T) { var list1 []string list1 = append(list1, "lst11", "lst12") addDocument(t, docStore, "docdb_9", "1", "John", "Doe", 45, tag1, list1) - docs, err := docStore.Get("docdb_9", "1") + docs, err := docStore.Get("docdb_9", "1", podPassword) if err != nil { t.Fatal(err) } @@ -634,7 +644,7 @@ func TestDocumentStore(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = docStore.Get("docdb_9", "1") + _, err = docStore.Get("docdb_9", "1", podPassword) if !errors.Is(err, collection.ErrEntryNotFound) { t.Fatal(err) } @@ -645,9 +655,9 @@ func TestDocumentStore(t *testing.T) { si := make(map[string]collection.IndexType) si["first_name"] = collection.StringIndex si["age"] = collection.NumberIndex - createDocumentDBs(t, []string{"docdb_10"}, docStore, si) + createDocumentDBs(t, []string{"docdb_10"}, docStore, si, podPassword) - err := docStore.OpenDocumentDB("docdb_10") + err := docStore.OpenDocumentDB("docdb_10", podPassword) if err != nil { t.Fatal(err) } @@ -670,7 +680,7 @@ func TestDocumentStore(t *testing.T) { } // count the total docs using another index to make sure we dont have it any index - docs, err := docStore.Find("docdb_10", "age=>20", -1) + docs, err := docStore.Find("docdb_10", "age=>20", podPassword, -1) if err != nil { t.Fatal(err) } @@ -686,14 +696,14 @@ func TestDocumentStore(t *testing.T) { si["age"] = collection.NumberIndex si["tag_map"] = collection.MapIndex si["tag_list"] = collection.ListIndex - createDocumentDBs(t, []string{"docdb_11"}, docStore, si) + createDocumentDBs(t, []string{"docdb_11"}, docStore, si, podPassword) - err := docStore.OpenDocumentDB("docdb_11") + err := docStore.OpenDocumentDB("docdb_11", podPassword) if err != nil { t.Fatal(err) } - docBatch, err := docStore.CreateDocBatch("docdb_11") + docBatch, err := docStore.CreateDocBatch("docdb_11", podPassword) if err != nil { t.Fatal(err) } @@ -738,7 +748,7 @@ func TestDocumentStore(t *testing.T) { } // count the total docs using another index to make sure we dont have it any index - docs, err := docStore.Find("docdb_11", "age=>20", -1) + docs, err := docStore.Find("docdb_11", "age=>20", podPassword, -1) if err != nil { t.Fatal(err) } @@ -747,123 +757,123 @@ func TestDocumentStore(t *testing.T) { } // tag - docs, err = docStore.Find("docdb_11", "tag_map=tgf21:tgv21", -1) + docs, err = docStore.Find("docdb_11", "tag_map=tgf21:tgv21", podPassword, -1) if err != nil { t.Fatal(err) } if len(docs) != 1 { t.Fatalf("expected count %d, got %d", 1, len(docs)) } - err = docStore.DeleteDocumentDB("docdb_11") + err = docStore.DeleteDocumentDB("docdb_11", podPassword) if err != nil { t.Fatal(err) } }) - - //t.Run("batch-immutable", func(t *testing.T) { - // // create a document DB - // si := make(map[string]collection.IndexType) - // si["first_name"] = collection.StringIndex - // si["age"] = collection.NumberIndex - // si["tag_map"] = collection.MapIndex - // si["tag_list"] = collection.ListIndex - // //createDocumentDBs(t, []string{"docdb_12"}, docStore, si) - // err := docStore.CreateDocumentDB("docdb_12", si, false) - // if err != nil { - // t.Fatal(err) - // } - // - // err = docStore.OpenDocumentDB("docdb_12") - // if err != nil { - // t.Fatal(err) - // } - // - // docBatch, err := docStore.CreateDocBatch("docdb_12") - // if err != nil { - // t.Fatal(err) - // } - // - // tag1 := make(map[string]string) - // tag1["tgf11"] = "tgv11" - // tag1["tgf12"] = "tgv12" - // var list1 []string - // list1 = append(list1, "lst11") - // list1 = append(list1, "lst12") - // addBatchDocument(t, docStore, docBatch, "1", "John", "Doe", 45, tag1, list1) - // tag2 := make(map[string]string) - // tag2["tgf21"] = "tgv21" - // tag2["tgf22"] = "tgv22" - // var list2 []string - // list2 = append(list2, "lst21") - // list2 = append(list2, "lst22") - // addBatchDocument(t, docStore, docBatch, "2", "John", "boy", 25, tag2, list2) - // tag3 := make(map[string]string) - // tag3["tgf31"] = "tgv31" - // tag3["tgf32"] = "tgv32" - // var list3 []string - // list3 = append(list3, "lst31") - // list3 = append(list3, "lst32") - // addBatchDocument(t, docStore, docBatch, "3", "Alice", "wonderland", 20, tag3, list3) - // tag4 := make(map[string]string) - // tag4["tgf41"] = "tgv41" - // tag4["tgf42"] = "tgv42" - // var list4 []string - // list4 = append(list4, "lst41") - // list4 = append(list4, "lst42") - // addBatchDocument(t, docStore, docBatch, "4", "John", "Doe", 35, tag4, list4) // this tests the overwriting in batch - // - // err = docStore.DocBatchWrite(docBatch, "") - // if err != nil { - // t.Fatal(err) - // } - // - // // count the total docs using id field - // count1, err := docStore.Count("docdb_12", "") - // if err != nil { - // t.Fatal(err) - // } - // if count1 != 4 { - // t.Fatalf("expected count %d, got %d", 4, count1) - // } - // - // // count the total docs using another index to make sure we dont have it any index - // docs, err := docStore.Find("docdb_12", "age=>20", -1) - // if err != nil { - // t.Fatal(err) - // } - // if len(docs) != 4 { - // t.Fatalf("expected count %d, got %d", 4, len(docs)) - // } - // - // // tag - // docs, err = docStore.Find("docdb_12", "tag_map=tgf21:tgv21", -1) - // if err != nil { - // t.Fatal(err) - // } - // if len(docs) != 1 { - // t.Fatalf("expected count %d, got %d", 1, len(docs)) - // } - // err = docStore.DeleteDocumentDB("docdb_12") - // if err != nil { - // t.Fatal(err) - // } - //}) - + /* + t.Run("batch-immutable", func(t *testing.T) { + // create a document DB + si := make(map[string]collection.IndexType) + si["first_name"] = collection.StringIndex + si["age"] = collection.NumberIndex + si["tag_map"] = collection.MapIndex + si["tag_list"] = collection.ListIndex + // createDocumentDBs(t, []string{"docdb_12"}, docStore, si) + err := docStore.CreateDocumentDB("docdb_12", si, false) + if err != nil { + t.Fatal(err) + } + + err = docStore.OpenDocumentDB("docdb_12") + if err != nil { + t.Fatal(err) + } + + docBatch, err := docStore.CreateDocBatch("docdb_12") + if err != nil { + t.Fatal(err) + } + + tag1 := make(map[string]string) + tag1["tgf11"] = "tgv11" + tag1["tgf12"] = "tgv12" + var list1 []string + list1 = append(list1, "lst11") + list1 = append(list1, "lst12") + addBatchDocument(t, docStore, docBatch, "1", "John", "Doe", 45, tag1, list1) + tag2 := make(map[string]string) + tag2["tgf21"] = "tgv21" + tag2["tgf22"] = "tgv22" + var list2 []string + list2 = append(list2, "lst21") + list2 = append(list2, "lst22") + addBatchDocument(t, docStore, docBatch, "2", "John", "boy", 25, tag2, list2) + tag3 := make(map[string]string) + tag3["tgf31"] = "tgv31" + tag3["tgf32"] = "tgv32" + var list3 []string + list3 = append(list3, "lst31") + list3 = append(list3, "lst32") + addBatchDocument(t, docStore, docBatch, "3", "Alice", "wonderland", 20, tag3, list3) + tag4 := make(map[string]string) + tag4["tgf41"] = "tgv41" + tag4["tgf42"] = "tgv42" + var list4 []string + list4 = append(list4, "lst41") + list4 = append(list4, "lst42") + addBatchDocument(t, docStore, docBatch, "4", "John", "Doe", 35, tag4, list4) // this tests the overwriting in batch + + err = docStore.DocBatchWrite(docBatch, "") + if err != nil { + t.Fatal(err) + } + + // count the total docs using id field + count1, err := docStore.Count("docdb_12", "") + if err != nil { + t.Fatal(err) + } + if count1 != 4 { + t.Fatalf("expected count %d, got %d", 4, count1) + } + + // count the total docs using another index to make sure we dont have it any index + docs, err := docStore.Find("docdb_12", "age=>20", -1) + if err != nil { + t.Fatal(err) + } + if len(docs) != 4 { + t.Fatalf("expected count %d, got %d", 4, len(docs)) + } + + // tag + docs, err = docStore.Find("docdb_12", "tag_map=tgf21:tgv21", -1) + if err != nil { + t.Fatal(err) + } + if len(docs) != 1 { + t.Fatalf("expected count %d, got %d", 1, len(docs)) + } + err = docStore.DeleteDocumentDB("docdb_12") + if err != nil { + t.Fatal(err) + } + }) + */ } -func createDocumentDBs(t *testing.T, dbNames []string, docStore *collection.Document, si map[string]collection.IndexType) { +func createDocumentDBs(t *testing.T, dbNames []string, docStore *collection.Document, si map[string]collection.IndexType, podPassword string) { t.Helper() for _, dbName := range dbNames { - err := docStore.CreateDocumentDB(dbName, si, true) + err := docStore.CreateDocumentDB(dbName, podPassword, si, true) if err != nil { t.Fatal(err) } } } -func checkIfDBsExists(t *testing.T, dbNames []string, docStore *collection.Document) { +func checkIfDBsExists(t *testing.T, dbNames []string, docStore *collection.Document, podPassword string) { t.Helper() - tables, err := docStore.LoadDocumentDBSchemas() + tables, err := docStore.LoadDocumentDBSchemas(podPassword) if err != nil { t.Fatal(err) } @@ -874,9 +884,9 @@ func checkIfDBsExists(t *testing.T, dbNames []string, docStore *collection.Docum } } -func checkIfDBNotExists(t *testing.T, tableName string, docStore *collection.Document) { +func checkIfDBNotExists(t *testing.T, tableName, podPassword string, docStore *collection.Document) { t.Helper() - tables, err := docStore.LoadDocumentDBSchemas() + tables, err := docStore.LoadDocumentDBSchemas(podPassword) if err != nil { t.Fatal(err) } @@ -885,9 +895,9 @@ func checkIfDBNotExists(t *testing.T, tableName string, docStore *collection.Doc } } -func loadSchemaAndCheckSimpleIndexCount(t *testing.T, docStore *collection.Document, dbName string, count int) collection.DBSchema { +func loadSchemaAndCheckSimpleIndexCount(t *testing.T, docStore *collection.Document, dbName, podPassword string, count int) collection.DBSchema { t.Helper() - tables, err := docStore.LoadDocumentDBSchemas() + tables, err := docStore.LoadDocumentDBSchemas(podPassword) if err != nil { t.Fatal(err) } diff --git a/pkg/collection/index.go b/pkg/collection/index.go index a6e27a16..97b857b0 100644 --- a/pkg/collection/index.go +++ b/pkg/collection/index.go @@ -78,17 +78,18 @@ func toIndexTypeEnum(s string) IndexType { } type Index struct { - name string - mutable bool - indexType IndexType - podFile string - user utils.Address - accountInfo *account.Info - feed *feed.API - client blockstore.Client - count uint64 - memDB *Manifest - logger logging.Logger + name string + mutable bool + indexType IndexType + podFile string + encryptionPassword string + user utils.Address + accountInfo *account.Info + feed *feed.API + client blockstore.Client + count uint64 + memDB *Manifest + logger logging.Logger } var ( @@ -96,13 +97,13 @@ var ( ) // CreateIndex creates a common index file to be used in kv or document tables. -func CreateIndex(podName, collectionName, indexName string, indexType IndexType, fd *feed.API, user utils.Address, client blockstore.Client, mutable bool) error { +func CreateIndex(podName, collectionName, indexName, encryptionPassword string, indexType IndexType, fd *feed.API, user utils.Address, client blockstore.Client, mutable bool) error { if fd.IsReadOnlyFeed() { // skipcq: TCV-001 return ErrReadOnlyIndex } actualIndexName := podName + collectionName + indexName topic := utils.HashString(actualIndexName) - _, oldData, err := fd.GetFeedData(topic, user) + _, oldData, err := fd.GetFeedData(topic, user, []byte(encryptionPassword)) if err == nil && len(oldData) != 0 && string(oldData) != utils.DeletedFeedMagicWord { // if the feed is present and it has some data means there index is still valid return ErrIndexAlreadyPresent @@ -122,13 +123,13 @@ func CreateIndex(podName, collectionName, indexName string, indexType IndexType, } if string(oldData) == utils.DeletedFeedMagicWord { // skipcq: TCV-001 - _, err = fd.UpdateFeed(topic, user, ref) + _, err = fd.UpdateFeed(topic, user, ref, []byte(encryptionPassword)) if err != nil { return ErrManifestCreate } return nil } - _, err = fd.CreateFeed(topic, user, ref) + _, err = fd.CreateFeed(topic, user, ref, []byte(encryptionPassword)) if err != nil { // skipcq: TCV-001 return ErrManifestCreate } @@ -136,42 +137,43 @@ func CreateIndex(podName, collectionName, indexName string, indexType IndexType, } // OpenIndex open the index and loas any index in to the memory. -func OpenIndex(podName, collectionName, indexName string, fd *feed.API, ai *account.Info, user utils.Address, client blockstore.Client, logger logging.Logger) (*Index, error) { +func OpenIndex(podName, collectionName, indexName, podPassword string, fd *feed.API, ai *account.Info, user utils.Address, client blockstore.Client, logger logging.Logger) (*Index, error) { actualIndexName := podName + collectionName + indexName - manifest := getRootManifestOfIndex(actualIndexName, fd, user, client) // this will load the entire Manifest for immutable indexes + manifest := getRootManifestOfIndex(actualIndexName, podPassword, fd, user, client) // this will load the entire Manifest for immutable indexes if manifest == nil { return nil, ErrIndexNotPresent } idx := &Index{ - name: manifest.Name, - mutable: manifest.Mutable, - indexType: manifest.IdxType, - podFile: manifest.PodFile, - user: user, - accountInfo: ai, - feed: fd, - client: client, - count: 0, - memDB: manifest, - logger: logger, + name: manifest.Name, + encryptionPassword: podPassword, + mutable: manifest.Mutable, + indexType: manifest.IdxType, + podFile: manifest.PodFile, + user: user, + accountInfo: ai, + feed: fd, + client: client, + count: 0, + memDB: manifest, + logger: logger, } return idx, nil } // DeleteIndex delete the index from file and all its entries. -func (idx *Index) DeleteIndex() error { +func (idx *Index) DeleteIndex(encryptionPassword string) error { if idx.isReadOnlyFeed() { // skipcq: TCV-001 return ErrReadOnlyIndex } - manifest := getRootManifestOfIndex(idx.name, idx.feed, idx.user, idx.client) + manifest := getRootManifestOfIndex(idx.name, encryptionPassword, idx.feed, idx.user, idx.client) if manifest == nil { return ErrIndexNotPresent } // erase the top Manifest topic := utils.HashString(idx.name) - _, err := idx.feed.UpdateFeed(topic, idx.user, []byte(utils.DeletedFeedMagicWord)) + _, err := idx.feed.UpdateFeed(topic, idx.user, []byte(utils.DeletedFeedMagicWord), []byte(encryptionPassword)) if err != nil { // skipcq: TCV-001 return ErrDeleteingIndex } @@ -179,9 +181,9 @@ func (idx *Index) DeleteIndex() error { } // CountIndex counts the entries in an index. -func (idx *Index) CountIndex() (uint64, error) { +func (idx *Index) CountIndex(encryptionPassword string) (uint64, error) { if idx.memDB == nil || idx.memDB.Entries == nil { - manifest, err := idx.loadManifest(idx.name) + manifest, err := idx.loadManifest(idx.name, encryptionPassword) if err != nil { return 0, err } @@ -198,7 +200,7 @@ func (idx *Index) CountIndex() (uint64, error) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - idx.loadIndexAndCount(ctx, cancel, workers, idx.memDB, errC) + idx.loadIndexAndCount(ctx, cancel, workers, idx.memDB, encryptionPassword, errC) select { case err := <-errC: // skipcq: TCV-001 if err != nil { @@ -210,14 +212,15 @@ func (idx *Index) CountIndex() (uint64, error) { return idx.count, nil } -func (idx *Index) loadIndexAndCount(ctx context.Context, cancel context.CancelFunc, workers chan bool, manifest *Manifest, errC chan error) { +func (idx *Index) loadIndexAndCount(ctx context.Context, cancel context.CancelFunc, workers chan bool, manifest *Manifest, + encryptionPassword string, errC chan error) { var count uint64 for _, entry := range manifest.Entries { if entry.EType == IntermediateEntry { var newManifest *Manifest if entry.Manifest == nil { - man, err := idx.loadManifest(manifest.Name + entry.Name) + man, err := idx.loadManifest(manifest.Name+entry.Name, encryptionPassword) if err != nil { // skipcq: TCV-001 idx.logger.Error("Manifest load error: ", manifest.Name+entry.Name) return @@ -227,7 +230,7 @@ func (idx *Index) loadIndexAndCount(ctx context.Context, cancel context.CancelFu } else { // skipcq: TCV-001 newManifest = entry.Manifest } - idx.loadIndexAndCount(ctx, cancel, workers, newManifest, errC) + idx.loadIndexAndCount(ctx, cancel, workers, newManifest, encryptionPassword, errC) } else { count++ } @@ -236,11 +239,11 @@ func (idx *Index) loadIndexAndCount(ctx context.Context, cancel context.CancelFu } // Manifest related functions -func (idx *Index) loadManifest(manifestPath string) (*Manifest, error) { +func (idx *Index) loadManifest(manifestPath, encryptionPassword string) (*Manifest, error) { // get feed data and unmarshall the Manifest idx.logger.Info("loading Manifest: ", manifestPath) topic := utils.HashString(manifestPath) - _, refData, err := idx.feed.GetFeedData(topic, idx.user) + _, refData, err := idx.feed.GetFeedData(topic, idx.user, []byte(encryptionPassword)) if err != nil { // skipcq: TCV-001 return nil, ErrNoManifestFound } @@ -261,7 +264,7 @@ func (idx *Index) loadManifest(manifestPath string) (*Manifest, error) { return &manifest, nil } -func (idx *Index) updateManifest(manifest *Manifest) error { +func (idx *Index) updateManifest(manifest *Manifest, encryptionPassword string) error { // marshall and update the Manifest in the feed idx.logger.Info("updating Manifest: ", manifest.Name) data, err := json.Marshal(manifest) @@ -275,14 +278,14 @@ func (idx *Index) updateManifest(manifest *Manifest) error { } topic := utils.HashString(manifest.Name) - _, err = idx.feed.UpdateFeed(topic, idx.user, ref) + _, err = idx.feed.UpdateFeed(topic, idx.user, ref, []byte(encryptionPassword)) if err != nil { // skipcq: TCV-001 return ErrManifestCreate } return nil } -func (idx *Index) storeManifest(manifest *Manifest) error { +func (idx *Index) storeManifest(manifest *Manifest, encryptionPassword string) error { // marshall and store the Manifest as new feed data, err := json.Marshal(manifest) if err != nil { // skipcq: TCV-001 @@ -300,7 +303,7 @@ func (idx *Index) storeManifest(manifest *Manifest) error { } topic := utils.HashString(manifest.Name) - _, err = idx.feed.CreateFeed(topic, idx.user, ref) + _, err = idx.feed.CreateFeed(topic, idx.user, ref, []byte(encryptionPassword)) if err != nil { // skipcq: TCV-001 return ErrManifestCreate } @@ -333,10 +336,10 @@ func longestCommonPrefix(str1, str2 string) (string, string, string) { return str1[:matchLen], str1[matchLen:], str2[matchLen:] } -func getRootManifestOfIndex(actualIndexName string, fd *feed.API, user utils.Address, client blockstore.Client) *Manifest { +func getRootManifestOfIndex(actualIndexName, encryptionPassword string, fd *feed.API, user utils.Address, client blockstore.Client) *Manifest { var manifest Manifest topic := utils.HashString(actualIndexName) - _, addr, err := fd.GetFeedData(topic, user) + _, addr, err := fd.GetFeedData(topic, user, []byte(encryptionPassword)) if err != nil { return nil } diff --git a/pkg/collection/index_api.go b/pkg/collection/index_api.go index f6c95844..ea147273 100644 --- a/pkg/collection/index_api.go +++ b/pkg/collection/index_api.go @@ -48,7 +48,7 @@ func (idx *Index) Put(key string, refValue []byte, idxType IndexType, apnd bool) } // get the first feed of the Index - manifest, err := idx.loadManifest(idx.name) + manifest, err := idx.loadManifest(idx.name, idx.encryptionPassword) if err != nil { // skipcq: TCV-001 return err } @@ -101,7 +101,7 @@ func (idx *Index) Delete(key string) ([][]byte, error) { // then we have to remove the intermediate node in the parent Manifest // so that the entire branch goes kaboom parentEntryKey := filepath.Base(manifest.Name) - parentManifest, err := idx.loadManifest(filepath.Dir(manifest.Name)) + parentManifest, err := idx.loadManifest(filepath.Dir(manifest.Name), idx.encryptionPassword) if err != nil { return nil, err } @@ -112,7 +112,7 @@ func (idx *Index) Delete(key string) ([][]byte, error) { break } } - err = idx.updateManifest(parentManifest) + err = idx.updateManifest(parentManifest, idx.encryptionPassword) if err != nil { // skipcq: TCV-001 return nil, err } @@ -120,7 +120,7 @@ func (idx *Index) Delete(key string) ([][]byte, error) { } manifest.Entries = append(manifest.Entries[:i], manifest.Entries[i+1:]...) - err = idx.updateManifest(manifest) + err = idx.updateManifest(manifest, idx.encryptionPassword) if err != nil { return nil, err } @@ -178,7 +178,7 @@ func (idx *Index) addOrUpdateStringEntry(ctx context.Context, manifest *Manifest // store the new Manifest with two leaves if !memory { - err := idx.storeManifest(&newManifest) + err := idx.storeManifest(&newManifest, idx.encryptionPassword) if err != nil { // skipcq: TCV-001 return err } @@ -218,7 +218,7 @@ func (idx *Index) addOrUpdateStringEntry(ctx context.Context, manifest *Manifest } idx.addEntryToManifestSortedLexicographically(&newManifest, entry2) if !memory { - err := idx.storeManifest(&newManifest) + err := idx.storeManifest(&newManifest, idx.encryptionPassword) if err != nil { // skipcq: TCV-001 return err } @@ -238,7 +238,7 @@ func (idx *Index) addOrUpdateStringEntry(ctx context.Context, manifest *Manifest } else if len(keySuffix) > 0 { // load the entry's Manifest and add the keySuffix as a new leaf if !memory { - intermediateManifest, err := idx.loadManifest(manifest.Name + entry.Name) + intermediateManifest, err := idx.loadManifest(manifest.Name+entry.Name, idx.encryptionPassword) if err != nil { // skipcq: TCV-001 return err } @@ -250,7 +250,7 @@ func (idx *Index) addOrUpdateStringEntry(ctx context.Context, manifest *Manifest } else if entrySuffix == "" && keySuffix == "" { // load the entry's Manifest and add the keySuffix as a new leaf if !memory { - intermediateManifest, err := idx.loadManifest(manifest.Name + prefix) + intermediateManifest, err := idx.loadManifest(manifest.Name+prefix, idx.encryptionPassword) if err != nil { // skipcq: TCV-001 return err } @@ -281,7 +281,7 @@ func (idx *Index) addOrUpdateStringEntry(ctx context.Context, manifest *Manifest } idx.addEntryToManifestSortedLexicographically(&newManifest, entry2) if !memory { - err := idx.storeManifest(&newManifest) + err := idx.storeManifest(&newManifest, idx.encryptionPassword) if err != nil { // skipcq: TCV-001 return err } @@ -315,7 +315,7 @@ func (idx *Index) addOrUpdateStringEntry(ctx context.Context, manifest *Manifest } if entryAdded && !memory { - return idx.updateManifest(manifest) + return idx.updateManifest(manifest, idx.encryptionPassword) } return nil // skipcq: TCV-001 } @@ -379,13 +379,13 @@ func (*Index) addEntryToManifestSortedLexicographically(manifest *Manifest, entr func (idx *Index) seekManifestAndEntry(key string) (*Manifest, *Manifest, int, error) { // load the first Manifest of the index - fm, err := idx.loadManifest(idx.name) + fm, err := idx.loadManifest(idx.name, idx.encryptionPassword) if err != nil && !errors.Is(err, ErrNoManifestFound) { // skipcq: TCV-001 return nil, nil, 0, err } // if there are any elements in the index, then search for the entry - if len(fm.Entries) > 0 { + if fm.Entries != nil && len(fm.Entries) > 0 { return idx.findManifest(nil, fm, key) } return nil, nil, 0, ErrEntryNotFound @@ -415,7 +415,7 @@ func (idx *Index) findManifest(grandParentManifest, parentManifest *Manifest, ke if entry.Manifest == nil { childManifestPath := parentManifest.Name + entry.Name var err error - childManifest, err = idx.loadManifest(childManifestPath) + childManifest, err = idx.loadManifest(childManifestPath, idx.encryptionPassword) if err != nil { // skipcq: TCV-001 return nil, nil, 0, err } diff --git a/pkg/collection/index_api_test.go b/pkg/collection/index_api_test.go index 6b86f15f..996f4728 100644 --- a/pkg/collection/index_api_test.go +++ b/pkg/collection/index_api_test.go @@ -23,6 +23,9 @@ import ( "net/http" "testing" + "github.com/fairdatasociety/fairOS-dfs/pkg/pod" + "github.com/fairdatasociety/fairOS-dfs/pkg/utils" + "github.com/fairdatasociety/fairOS-dfs/pkg/account" "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" "github.com/fairdatasociety/fairOS-dfs/pkg/collection" @@ -35,16 +38,16 @@ func TestIndexAPI(t *testing.T) { logger := logging.New(io.Discard, 0) acc := account.New(logger) ai := acc.GetUserAccountInfo() - _, _, err := acc.CreateUserAccount("password", "") + _, _, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } fd := feed.New(acc.GetUserAccountInfo(), mockClient, logger) user := acc.GetAddress(account.UserAccountIndex) - + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) t.Run("get-doc", func(t *testing.T) { // create a DB and open it - index := createAndOpenIndex(t, "pod1", "testdb_api_0", collection.StringIndex, fd, user, mockClient, ai, logger) + index := createAndOpenIndex(t, "pod1", "testdb_api_0", podPassword, collection.StringIndex, fd, user, mockClient, ai, logger) kvMap := addLotOfDocs(t, index, mockClient) // get the expectedValue of keys and check against its actual expectedValue @@ -63,11 +66,11 @@ func TestIndexAPI(t *testing.T) { t.Run("get-count", func(t *testing.T) { // create a DB and open it - index := createAndOpenIndex(t, "pod1", "testdb_api_1", collection.StringIndex, fd, user, mockClient, ai, logger) + index := createAndOpenIndex(t, "pod1", "testdb_api_1", podPassword, collection.StringIndex, fd, user, mockClient, ai, logger) kvMap := addLotOfDocs(t, index, mockClient) // find the count - count, err := index.CountIndex() + count, err := index.CountIndex(podPassword) if err != nil { t.Fatal(err) } @@ -79,7 +82,7 @@ func TestIndexAPI(t *testing.T) { t.Run("get-doc-del-doc-get-doc", func(t *testing.T) { // create a DB and open it - index := createAndOpenIndex(t, "pod1", "testdb_api_2", collection.StringIndex, fd, user, mockClient, ai, logger) + index := createAndOpenIndex(t, "pod1", "testdb_api_2", podPassword, collection.StringIndex, fd, user, mockClient, ai, logger) kvMap := addLotOfDocs(t, index, mockClient) // get the value of the key just to check @@ -104,7 +107,7 @@ func TestIndexAPI(t *testing.T) { t.Run("get-multiple_docs", func(t *testing.T) { // create a DB and open it - index := createAndOpenIndex(t, "pod1", "testdb_api_3", collection.StringIndex, fd, user, mockClient, ai, logger) + index := createAndOpenIndex(t, "pod1", "testdb_api_3", podPassword, collection.StringIndex, fd, user, mockClient, ai, logger) // add multiple values for the same key addDoc(t, "key1", []byte("value1"), index, mockClient, true) diff --git a/pkg/collection/index_test.go b/pkg/collection/index_test.go index 2abc84f8..57af5976 100644 --- a/pkg/collection/index_test.go +++ b/pkg/collection/index_test.go @@ -24,6 +24,8 @@ import ( "strconv" "testing" + "github.com/fairdatasociety/fairOS-dfs/pkg/pod" + "github.com/fairdatasociety/fairOS-dfs/pkg/account" "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore" "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" @@ -38,35 +40,35 @@ func TestIndex(t *testing.T) { logger := logging.New(io.Discard, 0) acc := account.New(logger) ai := acc.GetUserAccountInfo() - _, _, err := acc.CreateUserAccount("password", "") + _, _, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } fd := feed.New(acc.GetUserAccountInfo(), mockClient, logger) user := acc.GetAddress(account.UserAccountIndex) - + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) t.Run("create_index", func(t *testing.T) { // create an index - err := collection.CreateIndex("pod1", "testdb_index_0", "key", collection.StringIndex, fd, user, mockClient, true) + err := collection.CreateIndex("pod1", "testdb_index_0", "key", podPassword, collection.StringIndex, fd, user, mockClient, true) if err != nil { t.Fatal(err) } // check if the index is created - if !isIndexPresent(t, "pod1", "testdb_index_0", "key", fd, user, mockClient) { + if !isIndexPresent(t, "pod1", "testdb_index_0", "key", podPassword, fd, user, mockClient) { t.Fatalf("index not found") } }) t.Run("create_and_open_index", func(t *testing.T) { // create an index - err := collection.CreateIndex("pod1", "testdb_index_1", "key", collection.StringIndex, fd, user, mockClient, true) + err := collection.CreateIndex("pod1", "testdb_index_1", "key", podPassword, collection.StringIndex, fd, user, mockClient, true) if err != nil { t.Fatal(err) } - //Open the index - _, err = collection.OpenIndex("pod1", "testdb_index_1", "key", fd, ai, user, mockClient, logger) + // Open the index + _, err = collection.OpenIndex("pod1", "testdb_index_1", "key", podPassword, fd, ai, user, mockClient, logger) if err != nil { t.Fatal(err) } @@ -74,11 +76,11 @@ func TestIndex(t *testing.T) { t.Run("close_and_open_index_from_another_machine", func(t *testing.T) { // create a DB and open it - index := createAndOpenIndex(t, "pod1", "testdb_index_2", collection.StringIndex, fd, user, mockClient, ai, logger) + index := createAndOpenIndex(t, "pod1", "testdb_index_2", podPassword, collection.StringIndex, fd, user, mockClient, ai, logger) kvMap := addLotOfDocs(t, index, mockClient) // open the index again, simulating like another instance - index1, err := collection.OpenIndex("pod1", "testdb_index_2", "key", fd, acc.GetUserAccountInfo(), acc.GetAddress(account.UserAccountIndex), mockClient, logger) + index1, err := collection.OpenIndex("pod1", "testdb_index_2", "key", podPassword, fd, acc.GetUserAccountInfo(), acc.GetAddress(account.UserAccountIndex), mockClient, logger) if err != nil { t.Fatal(err) } @@ -99,21 +101,21 @@ func TestIndex(t *testing.T) { t.Run("create_already_present_index", func(t *testing.T) { // create an index - err := collection.CreateIndex("pod1", "testdb_index_3", "key", collection.StringIndex, fd, user, mockClient, true) + err := collection.CreateIndex("pod1", "testdb_index_3", "key", podPassword, collection.StringIndex, fd, user, mockClient, true) if err != nil { t.Fatal(err) } // create an index - err = collection.CreateIndex("pod1", "testdb_index_3", "key", collection.StringIndex, fd, user, mockClient, true) + err = collection.CreateIndex("pod1", "testdb_index_3", "key", podPassword, collection.StringIndex, fd, user, mockClient, true) if !errors.Is(err, collection.ErrIndexAlreadyPresent) { t.Fatal(err) } }) t.Run("open_index_without_creating_it", func(t *testing.T) { - //Open the index - _, err = collection.OpenIndex("pod1", "testdb_index_4", "key", fd, ai, user, mockClient, logger) + // Open the index + _, err = collection.OpenIndex("pod1", "testdb_index_4", "key", podPassword, fd, ai, user, mockClient, logger) if err != collection.ErrIndexNotPresent { t.Fatal(err) } @@ -121,19 +123,19 @@ func TestIndex(t *testing.T) { t.Run("create_and_delete_index", func(t *testing.T) { // create an index - err := collection.CreateIndex("pod1", "testdb_index_5", "key", collection.StringIndex, fd, user, mockClient, true) + err := collection.CreateIndex("pod1", "testdb_index_5", "key", podPassword, collection.StringIndex, fd, user, mockClient, true) if err != nil { t.Fatal(err) } - //Open the index - idx, err := collection.OpenIndex("pod1", "testdb_index_5", "key", fd, ai, user, mockClient, logger) + // Open the index + idx, err := collection.OpenIndex("pod1", "testdb_index_5", "key", podPassword, fd, ai, user, mockClient, logger) if err != nil { t.Fatal(err) } // delete Index - err = idx.DeleteIndex() + err = idx.DeleteIndex(podPassword) if err != nil { t.Fatal(err) } @@ -141,21 +143,21 @@ func TestIndex(t *testing.T) { t.Run("delete_index_without_creating_it", func(t *testing.T) { // simulate index not present by creating and deleting it - err := collection.CreateIndex("pod1", "testdb_index_6", "key", collection.StringIndex, fd, user, mockClient, true) + err := collection.CreateIndex("pod1", "testdb_index_6", "key", podPassword, collection.StringIndex, fd, user, mockClient, true) if err != nil { t.Fatal(err) } - idx, err := collection.OpenIndex("pod1", "testdb_index_6", "key", fd, ai, user, mockClient, logger) + idx, err := collection.OpenIndex("pod1", "testdb_index_6", "key", podPassword, fd, ai, user, mockClient, logger) if err != nil { t.Fatal(err) } - err = idx.DeleteIndex() + err = idx.DeleteIndex(podPassword) if err != nil { t.Fatal(err) } // delete Index which is not present - err = idx.DeleteIndex() + err = idx.DeleteIndex(podPassword) if err != collection.ErrIndexNotPresent { t.Fatal(err) } @@ -163,12 +165,12 @@ func TestIndex(t *testing.T) { t.Run("count_docs", func(t *testing.T) { // create index and add some docs - err := collection.CreateIndex("pod1", "testdb_index_7", "key", collection.StringIndex, fd, user, mockClient, true) + err := collection.CreateIndex("pod1", "testdb_index_7", "key", podPassword, collection.StringIndex, fd, user, mockClient, true) if err != nil { t.Fatal(err) } - idx, err := collection.OpenIndex("pod1", "testdb_index_7", "key", fd, ai, user, mockClient, logger) + idx, err := collection.OpenIndex("pod1", "testdb_index_7", "key", podPassword, fd, ai, user, mockClient, logger) if err != nil { t.Fatal(err) } @@ -180,7 +182,7 @@ func TestIndex(t *testing.T) { } // count and check the count - count, err := idx.CountIndex() + count, err := idx.CountIndex(podPassword) if err != nil { t.Fatal(err) } @@ -193,10 +195,10 @@ func TestIndex(t *testing.T) { } -func isIndexPresent(t *testing.T, podName, collectionName, indexName string, fd *feed.API, user utils.Address, client blockstore.Client) bool { +func isIndexPresent(t *testing.T, podName, collectionName, indexName, encryptionPassword string, fd *feed.API, user utils.Address, client blockstore.Client) bool { actualIndexName := podName + collectionName + indexName topic := utils.HashString(actualIndexName) - _, addr, err := fd.GetFeedData(topic, user) + _, addr, err := fd.GetFeedData(topic, user, []byte(encryptionPassword)) if err == nil && len(addr) != 0 { data, _, err := client.DownloadBlob(addr) if err != nil { diff --git a/pkg/collection/iterator.go b/pkg/collection/iterator.go index 4d7ae886..8784c4e9 100644 --- a/pkg/collection/iterator.go +++ b/pkg/collection/iterator.go @@ -47,7 +47,7 @@ func (idx *Index) NewStringIterator(start, end string, limit int64) (*Iterator, var manifest *Manifest if idx.mutable { // get the first feed of the Index - mf, err := idx.loadManifest(idx.name) + mf, err := idx.loadManifest(idx.name, idx.encryptionPassword) if err != nil { // skipcq: TCV-001 return nil, ErrEmptyIndex } @@ -89,7 +89,7 @@ func (idx *Index) NewIntIterator(start, end, limit int64) (*Iterator, error) { var manifest *Manifest if idx.mutable { // get the first feed of the Index - mf, err := idx.loadManifest(idx.name) + mf, err := idx.loadManifest(idx.name, idx.encryptionPassword) if err != nil { // skipcq: TCV-001 return nil, ErrEmptyIndex } @@ -140,7 +140,7 @@ func (idx *Index) NewIntIterator(start, end, limit int64) (*Iterator, error) { func (itr *Iterator) Seek(key string) error { var manifest *Manifest if itr.index.mutable { - mf, err := itr.index.loadManifest(itr.index.name) + mf, err := itr.index.loadManifest(itr.index.name, itr.index.encryptionPassword) if err != nil { // skipcq: TCV-001 return err } @@ -232,7 +232,7 @@ func (itr *Iterator) seekStringKey(manifest *Manifest, key string) error { var childManifest *Manifest if itr.index.mutable || entry.Manifest == nil { // now load the child Manifest and re-seek - cf, err := itr.index.loadManifest(manifest.Name + entry.Name) + cf, err := itr.index.loadManifest(manifest.Name+entry.Name, itr.index.encryptionPassword) if err != nil { // skipcq: TCV-001 return err } @@ -268,7 +268,7 @@ func (itr *Iterator) seekStringKey(manifest *Manifest, key string) error { var childManifest *Manifest if itr.index.mutable { // now load the child Manifest and re-seek - cf, err := itr.index.loadManifest(manifest.Name + entry.Name) + cf, err := itr.index.loadManifest(manifest.Name+entry.Name, itr.index.encryptionPassword) if err != nil { return err } @@ -361,7 +361,7 @@ func (itr *Iterator) nextStringKey() bool { if entry.EType == IntermediateEntry { var newManifest *Manifest if itr.index.mutable { - mf, err := itr.index.loadManifest(manifestState.currentManifest.Name + entry.Name) + mf, err := itr.index.loadManifest(manifestState.currentManifest.Name+entry.Name, itr.index.encryptionPassword) if err != nil { // skipcq: TCV-001 itr.error = err return false diff --git a/pkg/collection/iterator_test.go b/pkg/collection/iterator_test.go index f0fb899d..b03c883d 100644 --- a/pkg/collection/iterator_test.go +++ b/pkg/collection/iterator_test.go @@ -25,6 +25,8 @@ import ( "strconv" "testing" + "github.com/fairdatasociety/fairOS-dfs/pkg/pod" + "github.com/fairdatasociety/fairOS-dfs/pkg/account" "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore" "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" @@ -39,16 +41,16 @@ func TestIndexIterator(t *testing.T) { logger := logging.New(io.Discard, 0) acc := account.New(logger) ai := acc.GetUserAccountInfo() - _, _, err := acc.CreateUserAccount("password", "") + _, _, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } fd := feed.New(acc.GetUserAccountInfo(), mockClient, logger) user := acc.GetAddress(account.UserAccountIndex) - + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) t.Run("iterate_all_string_keys", func(t *testing.T) { // create a DB and open it - idx := createAndOpenIndex(t, "pod1", "testdb_iterator_0", collection.StringIndex, fd, user, mockClient, ai, logger) + idx := createAndOpenIndex(t, "pod1", "testdb_iterator_0", podPassword, collection.StringIndex, fd, user, mockClient, ai, logger) // add some documents and sort them lexicograpically actualCount := uint64(100) @@ -78,7 +80,7 @@ func TestIndexIterator(t *testing.T) { t.Run("iterate_all_random_string_keys", func(t *testing.T) { // create a DB and open it - idx := createAndOpenIndex(t, "pod1", "testdb_iterator_1", collection.StringIndex, fd, user, mockClient, ai, logger) + idx := createAndOpenIndex(t, "pod1", "testdb_iterator_1", podPassword, collection.StringIndex, fd, user, mockClient, ai, logger) // add some documents and sort them lexicograpically actualCount := uint64(100) @@ -108,7 +110,7 @@ func TestIndexIterator(t *testing.T) { t.Run("iterate_with_string_end_key", func(t *testing.T) { // create a DB and open it - idx := createAndOpenIndex(t, "pod1", "testdb_iterator_2", collection.StringIndex, fd, user, mockClient, ai, logger) + idx := createAndOpenIndex(t, "pod1", "testdb_iterator_2", podPassword, collection.StringIndex, fd, user, mockClient, ai, logger) // add some documents and sort them lexicograpically actualCount := uint64(100) @@ -143,7 +145,7 @@ func TestIndexIterator(t *testing.T) { t.Run("iterate_with_string_end_key", func(t *testing.T) { // create a DB and open it - idx := createAndOpenIndex(t, "pod1", "testdb_iterator_3", collection.StringIndex, fd, user, mockClient, ai, logger) + idx := createAndOpenIndex(t, "pod1", "testdb_iterator_3", podPassword, collection.StringIndex, fd, user, mockClient, ai, logger) // add some documents and sort them lexicograpically actualCount := uint64(100) keys, values := addDocsForStringIteration(t, idx, actualCount) @@ -156,7 +158,7 @@ func TestIndexIterator(t *testing.T) { } // check the iteration is in order until the end key - //skip the first key since "0" is lexicographically smaller than "00" + // skip the first key since "0" is lexicographically smaller than "00" for i := 1; i < 14; i++ { if itr.Next() { key := sortedKeys[i] @@ -178,7 +180,7 @@ func TestIndexIterator(t *testing.T) { t.Run("iterate_with_string_keys_with_limit", func(t *testing.T) { // create a DB and open it - idx := createAndOpenIndex(t, "pod1", "testdb_iterator_4", collection.StringIndex, fd, user, mockClient, ai, logger) + idx := createAndOpenIndex(t, "pod1", "testdb_iterator_4", podPassword, collection.StringIndex, fd, user, mockClient, ai, logger) // add some documents and sort them lexicograpically actualCount := uint64(100) @@ -213,7 +215,7 @@ func TestIndexIterator(t *testing.T) { t.Run("iterate_all_number_keys", func(t *testing.T) { // create a DB and open it - idx := createAndOpenIndex(t, "pod1", "testdb_iterator_5", collection.NumberIndex, fd, user, mockClient, ai, logger) + idx := createAndOpenIndex(t, "pod1", "testdb_iterator_5", podPassword, collection.NumberIndex, fd, user, mockClient, ai, logger) // add some documents and sort them lexicograpically actualCount := uint64(100) @@ -242,7 +244,7 @@ func TestIndexIterator(t *testing.T) { t.Run("iterate_all_number_random_keys", func(t *testing.T) { // create a DB and open it - idx := createAndOpenIndex(t, "pod1", "testdb_iterator_6", collection.NumberIndex, fd, user, mockClient, ai, logger) + idx := createAndOpenIndex(t, "pod1", "testdb_iterator_6", podPassword, collection.NumberIndex, fd, user, mockClient, ai, logger) // add some documents and sort them lexicograpically actualCount := uint64(100) @@ -277,7 +279,7 @@ func TestIndexIterator(t *testing.T) { t.Run("iterate_with_numbers_end_key", func(t *testing.T) { // create a DB and open it - idx := createAndOpenIndex(t, "pod1", "testdb_iterator_7", collection.NumberIndex, fd, user, mockClient, ai, logger) + idx := createAndOpenIndex(t, "pod1", "testdb_iterator_7", podPassword, collection.NumberIndex, fd, user, mockClient, ai, logger) // add some documents and sort them lexicograpically actualCount := uint64(100) @@ -311,7 +313,7 @@ func TestIndexIterator(t *testing.T) { t.Run("iterate_with_numbers_keys_with_limit", func(t *testing.T) { // create a DB and open it - idx := createAndOpenIndex(t, "pod1", "testdb_iterator_8", collection.NumberIndex, fd, user, mockClient, ai, logger) + idx := createAndOpenIndex(t, "pod1", "testdb_iterator_8", podPassword, collection.NumberIndex, fd, user, mockClient, ai, logger) // add some documents and sort them lexicograpically actualCount := uint64(100) @@ -438,13 +440,13 @@ func sortLexicographically(t *testing.T, keys, values []string) ([]string, []str return keys, values } -func createAndOpenIndex(t *testing.T, podName, collectionName string, indexType collection.IndexType, fd *feed.API, user utils.Address, +func createAndOpenIndex(t *testing.T, podName, collectionName, podPassword string, indexType collection.IndexType, fd *feed.API, user utils.Address, client blockstore.Client, ai *account.Info, logger logging.Logger) *collection.Index { - err := collection.CreateIndex(podName, collectionName, "key", indexType, fd, user, client, true) + err := collection.CreateIndex(podName, collectionName, "key", podPassword, indexType, fd, user, client, true) if err != nil { t.Fatal(err) } - idx, err := collection.OpenIndex(podName, collectionName, "key", fd, ai, user, client, logger) + idx, err := collection.OpenIndex(podName, collectionName, "key", podPassword, fd, ai, user, client, logger) if err != nil { t.Fatal(err) } diff --git a/pkg/collection/kv.go b/pkg/collection/kv.go index 540fc833..14f04640 100644 --- a/pkg/collection/kv.go +++ b/pkg/collection/kv.go @@ -75,13 +75,13 @@ func NewKeyValueStore(podName string, fd *feed.API, ai *account.Info, user utils } // CreateKVTable creates the key value table with a given index type. -func (kv *KeyValue) CreateKVTable(name string, indexType IndexType) error { +func (kv *KeyValue) CreateKVTable(name, encryptionPassword string, indexType IndexType) error { if kv.fd.IsReadOnlyFeed() { // skipcq: TCV-001 return ErrReadOnlyIndex } // load the existing db's and see if this name is already there - kvtables, err := kv.LoadKVTables() + kvtables, err := kv.LoadKVTables(encryptionPassword) if err != nil { // skipcq: TCV-001 return err } @@ -90,23 +90,23 @@ func (kv *KeyValue) CreateKVTable(name string, indexType IndexType) error { } // since this tables is not present already, create the index required for this table - err = CreateIndex(kv.podName, defaultCollectionName, name, indexType, kv.fd, kv.user, kv.client, true) + err = CreateIndex(kv.podName, defaultCollectionName, name, encryptionPassword, indexType, kv.fd, kv.user, kv.client, true) if err != nil { // skipcq: TCV-001 return err } // record the table as created kvtables[name] = []string{indexType.String()} - return kv.storeKVTables(kvtables) + return kv.storeKVTables(kvtables, encryptionPassword) } // DeleteKVTable deletes a given key value table with all it index and data entries. -func (kv *KeyValue) DeleteKVTable(name string) error { +func (kv *KeyValue) DeleteKVTable(name, encryptionPassword string) error { if kv.fd.IsReadOnlyFeed() { // skipcq: TCV-001 return ErrReadOnlyIndex } - kvtables, err := kv.LoadKVTables() + kvtables, err := kv.LoadKVTables(encryptionPassword) if err != nil { // skipcq: TCV-001 return err } @@ -118,32 +118,32 @@ func (kv *KeyValue) DeleteKVTable(name string) error { kv.openKVTMu.Lock() defer kv.openKVTMu.Unlock() if table, ok := kv.openKVTables[name]; ok { - err = table.index.DeleteIndex() + err = table.index.DeleteIndex(encryptionPassword) if err != nil { // skipcq: TCV-001 return err } delete(kv.openKVTables, name) } else { - idx, err := OpenIndex(kv.podName, defaultCollectionName, name, kv.fd, kv.ai, kv.user, kv.client, kv.logger) + idx, err := OpenIndex(kv.podName, defaultCollectionName, name, encryptionPassword, kv.fd, kv.ai, kv.user, kv.client, kv.logger) if err != nil { // skipcq: TCV-001 return err } - err = idx.DeleteIndex() + err = idx.DeleteIndex(encryptionPassword) if err != nil { // skipcq: TCV-001 return err } } delete(kvtables, name) - return kv.storeKVTables(kvtables) + return kv.storeKVTables(kvtables, encryptionPassword) } // DeleteAllKVTables deletes all key value tables with all their index and data entries. -func (kv *KeyValue) DeleteAllKVTables() error { +func (kv *KeyValue) DeleteAllKVTables(encryptionPassword string) error { if kv.fd.IsReadOnlyFeed() { // skipcq: TCV-001 return ErrReadOnlyIndex } - kvtables, err := kv.LoadKVTables() + kvtables, err := kv.LoadKVTables(encryptionPassword) if err != nil { // skipcq: TCV-001 return err } @@ -156,17 +156,17 @@ func (kv *KeyValue) DeleteAllKVTables() error { kv.openKVTMu.Lock() defer kv.openKVTMu.Unlock() if table, ok := kv.openKVTables[name]; ok { - err = table.index.DeleteIndex() + err = table.index.DeleteIndex(encryptionPassword) if err != nil { // skipcq: TCV-001 return err } delete(kv.openKVTables, name) } else { - idx, err := OpenIndex(kv.podName, defaultCollectionName, name, kv.fd, kv.ai, kv.user, kv.client, kv.logger) + idx, err := OpenIndex(kv.podName, defaultCollectionName, name, encryptionPassword, kv.fd, kv.ai, kv.user, kv.client, kv.logger) if err != nil { // skipcq: TCV-001 return err } - err = idx.DeleteIndex() + err = idx.DeleteIndex(encryptionPassword) if err != nil { // skipcq: TCV-001 return err } @@ -174,12 +174,12 @@ func (kv *KeyValue) DeleteAllKVTables() error { delete(kvtables, name) } - return kv.storeKVTables(kvtables) + return kv.storeKVTables(kvtables, encryptionPassword) } // OpenKVTable open a given key value table and loads the index. -func (kv *KeyValue) OpenKVTable(name string) error { - kvtables, err := kv.LoadKVTables() +func (kv *KeyValue) OpenKVTable(name, encryptionPassword string) error { + kvtables, err := kv.LoadKVTables(encryptionPassword) if err != nil { // skipcq: TCV-001 return err } @@ -189,7 +189,7 @@ func (kv *KeyValue) OpenKVTable(name string) error { } idxType := toIndexTypeEnum(values[0]) - idx, err := OpenIndex(kv.podName, defaultCollectionName, name, kv.fd, kv.ai, kv.user, kv.client, kv.logger) + idx, err := OpenIndex(kv.podName, defaultCollectionName, name, encryptionPassword, kv.fd, kv.ai, kv.user, kv.client, kv.logger) if err != nil { // skipcq: TCV-001 return err } @@ -213,11 +213,11 @@ func (kv *KeyValue) OpenKVTable(name string) error { } // KVCount counts the number of entries in the given key value table. -func (kv *KeyValue) KVCount(name string) (*TableKeyCount, error) { +func (kv *KeyValue) KVCount(name, encryptionPassword string) (*TableKeyCount, error) { kv.openKVTMu.Lock() defer kv.openKVTMu.Unlock() if table, ok := kv.openKVTables[name]; ok { - count, err := table.index.CountIndex() + count, err := table.index.CountIndex(table.index.encryptionPassword) if err != nil { return nil, err } @@ -226,11 +226,11 @@ func (kv *KeyValue) KVCount(name string) (*TableKeyCount, error) { TableName: name, }, nil } else { - idx, err := OpenIndex(kv.podName, defaultCollectionName, name, kv.fd, kv.ai, kv.user, kv.client, kv.logger) + idx, err := OpenIndex(kv.podName, defaultCollectionName, name, encryptionPassword, kv.fd, kv.ai, kv.user, kv.client, kv.logger) if err != nil { return nil, err } - count, err := idx.CountIndex() + count, err := idx.CountIndex(idx.encryptionPassword) if err != nil { return nil, err } @@ -406,10 +406,10 @@ func (kv *KeyValue) KVGetNext(name string) ([]string, string, []byte, error) { } // LoadKVTables Loads the list of KV tables. -func (kv *KeyValue) LoadKVTables() (map[string][]string, error) { +func (kv *KeyValue) LoadKVTables(encryptionPassword string) (map[string][]string, error) { collections := make(map[string][]string) topic := utils.HashString(kvFile) - _, data, err := kv.fd.GetFeedData(topic, kv.user) + _, data, err := kv.fd.GetFeedData(topic, kv.user, []byte(encryptionPassword)) if err != nil { if err.Error() != "feed does not exist or was not updated yet" { // skipcq: TCV-001 return collections, err @@ -433,7 +433,7 @@ func (kv *KeyValue) LoadKVTables() (map[string][]string, error) { return collections, nil } -func (kv *KeyValue) storeKVTables(collections map[string][]string) error { +func (kv *KeyValue) storeKVTables(collections map[string][]string, encryptionPassword string) error { buf := bytes.NewBuffer(nil) collectionLen := len(collections) if collectionLen > 0 { @@ -448,7 +448,7 @@ func (kv *KeyValue) storeKVTables(collections map[string][]string) error { if buf.Len() == 0 { data = []byte(utils.DeletedFeedMagicWord) } - _, err := kv.fd.UpdateFeed(topic, kv.user, data) + _, err := kv.fd.UpdateFeed(topic, kv.user, data, []byte(encryptionPassword)) if err != nil { // skipcq: TCV-001 return err } diff --git a/pkg/collection/kv_test.go b/pkg/collection/kv_test.go index eef4c573..55599e7f 100644 --- a/pkg/collection/kv_test.go +++ b/pkg/collection/kv_test.go @@ -28,6 +28,8 @@ import ( "strings" "testing" + "github.com/fairdatasociety/fairOS-dfs/pkg/pod" + "github.com/fairdatasociety/fairOS-dfs/pkg/account" "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" "github.com/fairdatasociety/fairOS-dfs/pkg/collection" @@ -41,21 +43,21 @@ func TestKeyValueStore(t *testing.T) { logger := logging.New(io.Discard, 0) acc := account.New(logger) ai := acc.GetUserAccountInfo() - _, _, err := acc.CreateUserAccount("password", "") + _, _, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } fd := feed.New(acc.GetUserAccountInfo(), mockClient, logger) user := acc.GetAddress(account.UserAccountIndex) kvStore := collection.NewKeyValueStore("pod1", fd, ai, user, mockClient, logger) - + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) t.Run("table_not_opened", func(t *testing.T) { - err := kvStore.CreateKVTable("kv_table_1314", collection.StringIndex) + err := kvStore.CreateKVTable("kv_table_1314", podPassword, collection.StringIndex) if err != nil { t.Fatal(err) } - err = kvStore.CreateKVTable("kv_table_1314", collection.StringIndex) + err = kvStore.CreateKVTable("kv_table_1314", podPassword, collection.StringIndex) if !errors.Is(err, collection.ErrKvTableAlreadyPresent) { t.Fatal("table should be already present") } @@ -64,23 +66,23 @@ func TestKeyValueStore(t *testing.T) { if !errors.Is(err, collection.ErrKVTableNotOpened) { t.Fatal("open table") } - err = kvStore.OpenKVTable("kv_table_1314") + err = kvStore.OpenKVTable("kv_table_1314", podPassword) if err != nil { t.Fatal(err) } // delete so that they dont show up in other testcases - err = kvStore.DeleteKVTable("kv_table_1314") + err = kvStore.DeleteKVTable("kv_table_1314", podPassword) if err != nil { t.Fatal(err) } }) t.Run("nil_itr", func(t *testing.T) { - err := kvStore.CreateKVTable("kv_table_1312", collection.StringIndex) + err := kvStore.CreateKVTable("kv_table_1312", podPassword, collection.StringIndex) if err != nil { t.Fatal(err) } - err = kvStore.OpenKVTable("kv_table_1312") + err = kvStore.OpenKVTable("kv_table_1312", podPassword) if err != nil { t.Fatal(err) } @@ -91,19 +93,19 @@ func TestKeyValueStore(t *testing.T) { } // delete so that they dont show up in other testcases - err = kvStore.DeleteKVTable("kv_table_1312") + err = kvStore.DeleteKVTable("kv_table_1312", podPassword) if err != nil { t.Fatal(err) } }) t.Run("create_kv_table_with_string_index", func(t *testing.T) { - err := kvStore.CreateKVTable("kv_table_0", collection.StringIndex) + err := kvStore.CreateKVTable("kv_table_0", podPassword, collection.StringIndex) if err != nil { t.Fatal(err) } - tables, err := kvStore.LoadKVTables() + tables, err := kvStore.LoadKVTables(podPassword) if err != nil { t.Fatal(err) } @@ -118,19 +120,19 @@ func TestKeyValueStore(t *testing.T) { } // delete so that they dont show up in other testcases - err = kvStore.DeleteKVTable("kv_table_0") + err = kvStore.DeleteKVTable("kv_table_0", podPassword) if err != nil { t.Fatal(err) } }) t.Run("create_kv_table_with_number_index", func(t *testing.T) { - err := kvStore.CreateKVTable("kv_table_1", collection.NumberIndex) + err := kvStore.CreateKVTable("kv_table_1", podPassword, collection.NumberIndex) if err != nil { t.Fatal(err) } - tables, err := kvStore.LoadKVTables() + tables, err := kvStore.LoadKVTables(podPassword) if err != nil { t.Fatal(err) } @@ -145,24 +147,24 @@ func TestKeyValueStore(t *testing.T) { } // delete so that they dont show up in other testcases - err = kvStore.DeleteKVTable("kv_table_1") + err = kvStore.DeleteKVTable("kv_table_1", podPassword) if err != nil { t.Fatal(err) } }) t.Run("check_delete", func(t *testing.T) { - err := kvStore.CreateKVTable("kv_table_2", collection.StringIndex) + err := kvStore.CreateKVTable("kv_table_2", podPassword, collection.StringIndex) if err != nil { t.Fatal(err) } - err = kvStore.DeleteKVTable("kv_table_2") + err = kvStore.DeleteKVTable("kv_table_2", podPassword) if err != nil { t.Fatal(err) } - tables, err := kvStore.LoadKVTables() + tables, err := kvStore.LoadKVTables(podPassword) if err != nil { t.Fatal(err) } @@ -174,20 +176,20 @@ func TestKeyValueStore(t *testing.T) { }) t.Run("create_multiple_kv_tables_and_delete", func(t *testing.T) { - err := kvStore.CreateKVTable("kv_table_31", collection.StringIndex) + err := kvStore.CreateKVTable("kv_table_31", podPassword, collection.StringIndex) if err != nil { t.Fatal(err) } - err = kvStore.CreateKVTable("kv_table_32", collection.NumberIndex) + err = kvStore.CreateKVTable("kv_table_32", podPassword, collection.NumberIndex) if err != nil { t.Fatal(err) } - err = kvStore.CreateKVTable("kv_table_33", collection.StringIndex) + err = kvStore.CreateKVTable("kv_table_33", podPassword, collection.StringIndex) if err != nil { t.Fatal(err) } - tables, err := kvStore.LoadKVTables() + tables, err := kvStore.LoadKVTables(podPassword) if err != nil { t.Fatal(err) } @@ -222,12 +224,12 @@ func TestKeyValueStore(t *testing.T) { } // delete the last table - err = kvStore.DeleteKVTable("kv_table_33") + err = kvStore.DeleteKVTable("kv_table_33", podPassword) if err != nil { t.Fatal(err) } - tables, err = kvStore.LoadKVTables() + tables, err = kvStore.LoadKVTables(podPassword) if err != nil { t.Fatal(err) } @@ -254,19 +256,19 @@ func TestKeyValueStore(t *testing.T) { }) t.Run("create_open_and_delete", func(t *testing.T) { - err := kvStore.CreateKVTable("kv_table_4", collection.StringIndex) + err := kvStore.CreateKVTable("kv_table_4", podPassword, collection.StringIndex) if err != nil { t.Fatal(err) } // open the table - err = kvStore.OpenKVTable("kv_table_4") + err = kvStore.OpenKVTable("kv_table_4", podPassword) if err != nil { t.Fatal(err) } // delete the opened table - err = kvStore.DeleteKVTable("kv_table_4") + err = kvStore.DeleteKVTable("kv_table_4", podPassword) if err != nil { t.Fatal(err) } @@ -275,18 +277,18 @@ func TestKeyValueStore(t *testing.T) { t.Run("delete_without_create", func(t *testing.T) { // delete the last table - err = kvStore.DeleteKVTable("kv_table_5") + err = kvStore.DeleteKVTable("kv_table_5", podPassword) if !errors.Is(err, collection.ErrKVTableNotPresent) { t.Fatal("was able to delete table without creating it") } }) t.Run("open_table", func(t *testing.T) { - err := kvStore.CreateKVTable("kv_table_6", collection.StringIndex) + err := kvStore.CreateKVTable("kv_table_6", podPassword, collection.StringIndex) if err != nil { t.Fatal(err) } - err = kvStore.OpenKVTable("kv_table_6") + err = kvStore.OpenKVTable("kv_table_6", podPassword) if err != nil { t.Fatal(err) } @@ -299,18 +301,18 @@ func TestKeyValueStore(t *testing.T) { }) t.Run("open_without_create", func(t *testing.T) { - err = kvStore.OpenKVTable("kv_table_7") + err = kvStore.OpenKVTable("kv_table_7", podPassword) if !errors.Is(err, collection.ErrKVTableNotPresent) { t.Fatal("was able to open table without creating it") } }) t.Run("put_string_index", func(t *testing.T) { - err := kvStore.CreateKVTable("kv_table_8", collection.StringIndex) + err := kvStore.CreateKVTable("kv_table_8", podPassword, collection.StringIndex) if err != nil { t.Fatal(err) } - err = kvStore.OpenKVTable("kv_table_8") + err = kvStore.OpenKVTable("kv_table_8", podPassword) if err != nil { t.Fatal(err) } @@ -331,7 +333,7 @@ func TestKeyValueStore(t *testing.T) { t.Fatal(err) } - countObject, err := kvStore.KVCount("kv_table_8") + countObject, err := kvStore.KVCount("kv_table_8", podPassword) if err != nil { t.Fatal(err) } @@ -341,7 +343,7 @@ func TestKeyValueStore(t *testing.T) { }) t.Run("put_bytes_index", func(t *testing.T) { - err := kvStore.CreateKVTable("kv_table_bytes", collection.BytesIndex) + err := kvStore.CreateKVTable("kv_table_bytes", podPassword, collection.BytesIndex) if err != nil { t.Fatal(err) } @@ -349,7 +351,7 @@ func TestKeyValueStore(t *testing.T) { if !errors.Is(err, collection.ErrKVTableNotOpened) { t.Fatal("kv table open") } - err = kvStore.OpenKVTable("kv_table_bytes") + err = kvStore.OpenKVTable("kv_table_bytes", podPassword) if err != nil { t.Fatal(err) } @@ -370,7 +372,7 @@ func TestKeyValueStore(t *testing.T) { t.Fatal(err) } - countObject, err := kvStore.KVCount("kv_table_bytes") + countObject, err := kvStore.KVCount("kv_table_bytes", podPassword) if err != nil { t.Fatal(err) } @@ -380,11 +382,11 @@ func TestKeyValueStore(t *testing.T) { }) t.Run("put_chinese_string_index", func(t *testing.T) { - err := kvStore.CreateKVTable("kv_table_9", collection.StringIndex) + err := kvStore.CreateKVTable("kv_table_9", podPassword, collection.StringIndex) if err != nil { t.Fatal(err) } - err = kvStore.OpenKVTable("kv_table_9") + err = kvStore.OpenKVTable("kv_table_9", podPassword) if err != nil { t.Fatal(err) } @@ -407,11 +409,11 @@ func TestKeyValueStore(t *testing.T) { }) t.Run("put_string_in_number_index", func(t *testing.T) { - err := kvStore.CreateKVTable("kv_table_10", collection.NumberIndex) + err := kvStore.CreateKVTable("kv_table_10", podPassword, collection.NumberIndex) if err != nil { t.Fatal(err) } - err = kvStore.OpenKVTable("kv_table_10") + err = kvStore.OpenKVTable("kv_table_10", podPassword) if err != nil { t.Fatal(err) } @@ -422,11 +424,11 @@ func TestKeyValueStore(t *testing.T) { }) t.Run("put_get_del_get_string_index", func(t *testing.T) { - err := kvStore.CreateKVTable("kv_table_11", collection.StringIndex) + err := kvStore.CreateKVTable("kv_table_11", podPassword, collection.StringIndex) if err != nil { t.Fatal(err) } - err = kvStore.OpenKVTable("kv_table_11") + err = kvStore.OpenKVTable("kv_table_11", podPassword) if err != nil { t.Fatal(err) } @@ -466,7 +468,7 @@ func TestKeyValueStore(t *testing.T) { }) t.Run("put_without_opening_table", func(t *testing.T) { - err := kvStore.CreateKVTable("kv_table_12", collection.StringIndex) + err := kvStore.CreateKVTable("kv_table_12", podPassword, collection.StringIndex) if err != nil { t.Fatal(err) } @@ -477,11 +479,11 @@ func TestKeyValueStore(t *testing.T) { }) t.Run("delete_non_existent_string_index", func(t *testing.T) { - err := kvStore.CreateKVTable("kv_table_13", collection.StringIndex) + err := kvStore.CreateKVTable("kv_table_13", podPassword, collection.StringIndex) if err != nil { t.Fatal(err) } - err = kvStore.OpenKVTable("kv_table_13") + err = kvStore.OpenKVTable("kv_table_13", podPassword) if err != nil { t.Fatal(err) } @@ -498,7 +500,7 @@ func TestKeyValueStore(t *testing.T) { }) t.Run("batch_without_open", func(t *testing.T) { - err := kvStore.CreateKVTable("kv_table_batch_1", collection.StringIndex) + err := kvStore.CreateKVTable("kv_table_batch_1", podPassword, collection.StringIndex) if err != nil { t.Fatal(err) } @@ -510,11 +512,11 @@ func TestKeyValueStore(t *testing.T) { }) t.Run("batch_columns_and_get_values", func(t *testing.T) { - err := kvStore.CreateKVTable("kv_table_batch_2", collection.StringIndex) + err := kvStore.CreateKVTable("kv_table_batch_2", podPassword, collection.StringIndex) if err != nil { t.Fatal(err) } - err = kvStore.OpenKVTable("kv_table_batch_2") + err = kvStore.OpenKVTable("kv_table_batch_2", podPassword) if err != nil { t.Fatal(err) } @@ -555,11 +557,11 @@ func TestKeyValueStore(t *testing.T) { }) t.Run("batch_put_columns_and_get_values", func(t *testing.T) { - err := kvStore.CreateKVTable("kv_table_batch_9", collection.StringIndex) + err := kvStore.CreateKVTable("kv_table_batch_9", podPassword, collection.StringIndex) if err != nil { t.Fatal(err) } - err = kvStore.OpenKVTable("kv_table_batch_9") + err = kvStore.OpenKVTable("kv_table_batch_9", podPassword) if err != nil { t.Fatal(err) } @@ -605,11 +607,11 @@ func TestKeyValueStore(t *testing.T) { }) t.Run("count_columns_and_get_values", func(t *testing.T) { - err := kvStore.CreateKVTable("kv_table_batch_count", collection.StringIndex) + err := kvStore.CreateKVTable("kv_table_batch_count", podPassword, collection.StringIndex) if err != nil { t.Fatal(err) } - countObject, err := kvStore.KVCount("kv_table_batch_count") + countObject, err := kvStore.KVCount("kv_table_batch_count", podPassword) if err != nil { t.Fatal(err) } @@ -619,11 +621,11 @@ func TestKeyValueStore(t *testing.T) { }) t.Run("Iterate_string_keys", func(t *testing.T) { - err := kvStore.CreateKVTable("kv_table_Itr_0", collection.StringIndex) + err := kvStore.CreateKVTable("kv_table_Itr_0", podPassword, collection.StringIndex) if err != nil { t.Fatal(err) } - err = kvStore.OpenKVTable("kv_table_Itr_0") + err = kvStore.OpenKVTable("kv_table_Itr_0", podPassword) if err != nil { t.Fatal(err) } @@ -655,11 +657,11 @@ func TestKeyValueStore(t *testing.T) { tableNo := 0 research: tableNo++ - err := kvStore.CreateKVTable(fmt.Sprintf("kv_table_Itr_01%d", tableNo), collection.StringIndex) + err := kvStore.CreateKVTable(fmt.Sprintf("kv_table_Itr_01%d", tableNo), podPassword, collection.StringIndex) if err != nil { t.Fatal(err) } - err = kvStore.OpenKVTable(fmt.Sprintf("kv_table_Itr_01%d", tableNo)) + err = kvStore.OpenKVTable(fmt.Sprintf("kv_table_Itr_01%d", tableNo), podPassword) if err != nil { t.Fatal(err) } @@ -708,11 +710,11 @@ func TestKeyValueStore(t *testing.T) { tableNo := 0 research: tableNo++ - err := kvStore.CreateKVTable(fmt.Sprintf("kv_table_Itr_1%d", tableNo), collection.StringIndex) + err := kvStore.CreateKVTable(fmt.Sprintf("kv_table_Itr_1%d", tableNo), podPassword, collection.StringIndex) if err != nil { t.Fatal(err) } - err = kvStore.OpenKVTable(fmt.Sprintf("kv_table_Itr_1%d", tableNo)) + err = kvStore.OpenKVTable(fmt.Sprintf("kv_table_Itr_1%d", tableNo), podPassword) if err != nil { t.Fatal(err) } @@ -776,11 +778,11 @@ func TestKeyValueStore(t *testing.T) { t.Run("Iterate_seek_start_end_string_keys_over_a_known_failing_keys", func(t *testing.T) { tableNo := 486 - err := kvStore.CreateKVTable(fmt.Sprintf("kv_table_Itr_1%d", tableNo), collection.StringIndex) + err := kvStore.CreateKVTable(fmt.Sprintf("kv_table_Itr_1%d", tableNo), podPassword, collection.StringIndex) if err != nil { t.Fatal(err) } - err = kvStore.OpenKVTable(fmt.Sprintf("kv_table_Itr_1%d", tableNo)) + err = kvStore.OpenKVTable(fmt.Sprintf("kv_table_Itr_1%d", tableNo), podPassword) if err != nil { t.Fatal(err) } @@ -853,11 +855,11 @@ func TestKeyValueStore(t *testing.T) { }) t.Run("Iterate_string_of_numbers_keys", func(t *testing.T) { - err := kvStore.CreateKVTable("kv_table_Itr_3", collection.StringIndex) + err := kvStore.CreateKVTable("kv_table_Itr_3", podPassword, collection.StringIndex) if err != nil { t.Fatal(err) } - err = kvStore.OpenKVTable("kv_table_Itr_3") + err = kvStore.OpenKVTable("kv_table_Itr_3", podPassword) if err != nil { t.Fatal(err) } @@ -886,11 +888,11 @@ func TestKeyValueStore(t *testing.T) { }) t.Run("Iterate_numbers_keys", func(t *testing.T) { - err := kvStore.CreateKVTable("kv_table_Itr_4", collection.NumberIndex) + err := kvStore.CreateKVTable("kv_table_Itr_4", podPassword, collection.NumberIndex) if err != nil { t.Fatal(err) } - err = kvStore.OpenKVTable("kv_table_Itr_4") + err = kvStore.OpenKVTable("kv_table_Itr_4", podPassword) if err != nil { t.Fatal(err) } @@ -920,11 +922,11 @@ func TestKeyValueStore(t *testing.T) { }) t.Run("Iterate_numbers_start_end_keys", func(t *testing.T) { - err := kvStore.CreateKVTable("kv_table_Itr_5", collection.NumberIndex) + err := kvStore.CreateKVTable("kv_table_Itr_5", podPassword, collection.NumberIndex) if err != nil { t.Fatal(err) } - err = kvStore.OpenKVTable("kv_table_Itr_5") + err = kvStore.OpenKVTable("kv_table_Itr_5", podPassword) if err != nil { t.Fatal(err) } @@ -973,11 +975,11 @@ func TestKeyValueStore(t *testing.T) { }) t.Run("Iterate_numbers_start_and_limit_keys", func(t *testing.T) { - err := kvStore.CreateKVTable("kv_table_Itr_6", collection.NumberIndex) + err := kvStore.CreateKVTable("kv_table_Itr_6", podPassword, collection.NumberIndex) if err != nil { t.Fatal(err) } - err = kvStore.OpenKVTable("kv_table_Itr_6") + err = kvStore.OpenKVTable("kv_table_Itr_6", podPassword) if err != nil { t.Fatal(err) } @@ -1020,11 +1022,11 @@ func TestKeyValueStore(t *testing.T) { }) t.Run("get_non_existent_string_index", func(t *testing.T) { - err := kvStore.CreateKVTable("kv_table_1313", collection.StringIndex) + err := kvStore.CreateKVTable("kv_table_1313", podPassword, collection.StringIndex) if err != nil { t.Fatal(err) } - err = kvStore.OpenKVTable("kv_table_1313") + err = kvStore.OpenKVTable("kv_table_1313", podPassword) if err != nil { t.Fatal(err) } @@ -1050,11 +1052,11 @@ func TestKeyValueStore(t *testing.T) { }) t.Run("err_byte_index", func(t *testing.T) { - err := kvStore.CreateKVTable("kv_table_1316", collection.BytesIndex) + err := kvStore.CreateKVTable("kv_table_1316", podPassword, collection.BytesIndex) if err != nil { t.Fatal(err) } - err = kvStore.OpenKVTable("kv_table_1316") + err = kvStore.OpenKVTable("kv_table_1316", podPassword) if err != nil { t.Fatal(err) } @@ -1070,11 +1072,11 @@ func TestKeyValueStore(t *testing.T) { }) t.Run("err_seek_list_index", func(t *testing.T) { - err := kvStore.CreateKVTable("kv_table_1317", collection.ListIndex) + err := kvStore.CreateKVTable("kv_table_1317", podPassword, collection.ListIndex) if err != nil { t.Fatal(err) } - err = kvStore.OpenKVTable("kv_table_1317") + err = kvStore.OpenKVTable("kv_table_1317", podPassword) if err != nil { t.Fatal(err) } @@ -1086,11 +1088,11 @@ func TestKeyValueStore(t *testing.T) { }) t.Run("err_seek_map_index", func(t *testing.T) { - err := kvStore.CreateKVTable("kv_table_1318", collection.MapIndex) + err := kvStore.CreateKVTable("kv_table_1318", podPassword, collection.MapIndex) if err != nil { t.Fatal(err) } - err = kvStore.OpenKVTable("kv_table_1318") + err = kvStore.OpenKVTable("kv_table_1318", podPassword) if err != nil { t.Fatal(err) } @@ -1102,11 +1104,11 @@ func TestKeyValueStore(t *testing.T) { }) t.Run("err_seek_invalid_index", func(t *testing.T) { - err := kvStore.CreateKVTable("kv_table_1319", collection.InvalidIndex) + err := kvStore.CreateKVTable("kv_table_1319", podPassword, collection.InvalidIndex) if err != nil { t.Fatal(err) } - err = kvStore.OpenKVTable("kv_table_1319") + err = kvStore.OpenKVTable("kv_table_1319", podPassword) if err != nil { t.Fatal(err) } @@ -1118,7 +1120,7 @@ func TestKeyValueStore(t *testing.T) { }) t.Run("seek_unopened_table", func(t *testing.T) { - err := kvStore.CreateKVTable("kv_table_1320", collection.ListIndex) + err := kvStore.CreateKVTable("kv_table_1320", podPassword, collection.ListIndex) if err != nil { t.Fatal(err) } diff --git a/pkg/dfs/api.go b/pkg/dfs/api.go index 4c24caf9..c4fa4892 100644 --- a/pkg/dfs/api.go +++ b/pkg/dfs/api.go @@ -17,7 +17,12 @@ limitations under the License. package dfs import ( + "context" "errors" + "io" + "time" + + "github.com/plexsysio/taskmanager" "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore" "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee" @@ -27,12 +32,18 @@ import ( "github.com/fairdatasociety/fairOS-dfs/pkg/user" ) +const ( + defaultMaxWorkers = 100 +) + // API is the go api for fairOS type API struct { client blockstore.Client users *user.Users logger logging.Logger dataDir string + tm *taskmanager.TaskManager + io.Closer } // NewDfsAPI is the main entry point for the df controller. @@ -49,11 +60,13 @@ func NewDfsAPI(dataDir, apiUrl, postageBlockId string, isGatewayProxy bool, ensC return nil, ErrBeeClient } users := user.NewUsers(dataDir, c, ens, logger) + return &API{ client: c, users: users, logger: logger, dataDir: dataDir, + tm: taskmanager.New(1, defaultMaxWorkers, time.Second*15, logger), }, nil } @@ -64,5 +77,13 @@ func NewMockDfsAPI(client blockstore.Client, users *user.Users, logger logging.L users: users, logger: logger, dataDir: dataDir, + tm: taskmanager.New(1, 100, time.Second*15, logger), } } + +// Close stops the taskmanager +func (a *API) Close() error { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + return a.tm.Stop(ctx) +} diff --git a/pkg/dfs/doc_api.go b/pkg/dfs/doc_api.go index d7b9e721..caf885e9 100644 --- a/pkg/dfs/doc_api.go +++ b/pkg/dfs/doc_api.go @@ -19,9 +19,9 @@ package dfs import "github.com/fairdatasociety/fairOS-dfs/pkg/collection" // DocCreate is a controller function which does all the checks before creating a documentDB. -func (d *API) DocCreate(sessionId, podName, name string, indexes map[string]collection.IndexType, mutable bool) error { +func (a *API) DocCreate(sessionId, podName, name string, indexes map[string]collection.IndexType, mutable bool) error { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return ErrUserNotLoggedIn } @@ -31,18 +31,18 @@ func (d *API) DocCreate(sessionId, podName, name string, indexes map[string]coll return ErrPodNotOpen } - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return err } - return podInfo.GetDocStore().CreateDocumentDB(name, indexes, mutable) + return podInfo.GetDocStore().CreateDocumentDB(name, podInfo.GetPodPassword(), indexes, mutable) } // DocOpen is a controller function which does all the checks before opening a documentDB. -func (d *API) DocOpen(sessionId, podName, name string) error { +func (a *API) DocOpen(sessionId, podName, name string) error { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return ErrUserNotLoggedIn } @@ -52,18 +52,18 @@ func (d *API) DocOpen(sessionId, podName, name string) error { return ErrPodNotOpen } - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return err } - return podInfo.GetDocStore().OpenDocumentDB(name) + return podInfo.GetDocStore().OpenDocumentDB(name, podInfo.GetPodPassword()) } // DocDelete is a controller function which does all the checks before deleting a documentDB. -func (d *API) DocDelete(sessionId, podName, name string) error { +func (a *API) DocDelete(sessionId, podName, name string) error { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return ErrUserNotLoggedIn } @@ -73,19 +73,19 @@ func (d *API) DocDelete(sessionId, podName, name string) error { return ErrPodNotOpen } - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return err } - return podInfo.GetDocStore().DeleteDocumentDB(name) + return podInfo.GetDocStore().DeleteDocumentDB(name, podInfo.GetPodPassword()) } // DocList is a controller function which does all the checks before listing all the // documentDB available in the pod. -func (d *API) DocList(sessionId, podName string) (map[string]collection.DBSchema, error) { +func (a *API) DocList(sessionId, podName string) (map[string]collection.DBSchema, error) { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return nil, ErrUserNotLoggedIn } @@ -95,20 +95,20 @@ func (d *API) DocList(sessionId, podName string) (map[string]collection.DBSchema return nil, ErrPodNotOpen } - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return nil, err } - return podInfo.GetDocStore().LoadDocumentDBSchemas() + return podInfo.GetDocStore().LoadDocumentDBSchemas(podInfo.GetPodPassword()) } // DocCount is a controller function which does all the checks before counting // all the documents ina documentDB. -func (d *API) DocCount(sessionId, podName, name, expr string) (*collection.TableKeyCount, error) { +func (a *API) DocCount(sessionId, podName, name, expr string) (*collection.TableKeyCount, error) { keyCount := &collection.TableKeyCount{TableName: name} // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return keyCount, ErrUserNotLoggedIn } @@ -118,7 +118,7 @@ func (d *API) DocCount(sessionId, podName, name, expr string) (*collection.Table return keyCount, ErrPodNotOpen } - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return keyCount, err } @@ -133,9 +133,9 @@ func (d *API) DocCount(sessionId, podName, name, expr string) (*collection.Table // DocPut is a controller function which does all the checks before inserting // a document in the documentDB. -func (d *API) DocPut(sessionId, podName, name string, value []byte) error { +func (a *API) DocPut(sessionId, podName, name string, value []byte) error { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return ErrUserNotLoggedIn } @@ -145,7 +145,7 @@ func (d *API) DocPut(sessionId, podName, name string, value []byte) error { return ErrPodNotOpen } - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return err } @@ -154,10 +154,10 @@ func (d *API) DocPut(sessionId, podName, name string, value []byte) error { } // DocGet is a controller function which does all the checks before retrieving -//// a document in the documentDB. -func (d *API) DocGet(sessionId, podName, name, id string) ([]byte, error) { +// // a document in the documentDB. +func (a *API) DocGet(sessionId, podName, name, id string) ([]byte, error) { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return nil, ErrUserNotLoggedIn } @@ -167,19 +167,19 @@ func (d *API) DocGet(sessionId, podName, name, id string) ([]byte, error) { return nil, ErrPodNotOpen } - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return nil, err } - return podInfo.GetDocStore().Get(name, id) + return podInfo.GetDocStore().Get(name, id, podInfo.GetPodPassword()) } // DocDel is a controller function which does all the checks before deleting // a documentDB. -func (d *API) DocDel(sessionId, podName, name, id string) error { +func (a *API) DocDel(sessionId, podName, name, id string) error { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return ErrUserNotLoggedIn } @@ -189,7 +189,7 @@ func (d *API) DocDel(sessionId, podName, name, id string) error { return ErrPodNotOpen } - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return err } @@ -199,9 +199,9 @@ func (d *API) DocDel(sessionId, podName, name, id string) error { // DocFind is a controller function which does all the checks before finding // records from a documentDB. -func (d *API) DocFind(sessionId, podName, name, expr string, limit int) ([][]byte, error) { +func (a *API) DocFind(sessionId, podName, name, expr string, limit int) ([][]byte, error) { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return nil, ErrUserNotLoggedIn } @@ -211,18 +211,18 @@ func (d *API) DocFind(sessionId, podName, name, expr string, limit int) ([][]byt return nil, ErrPodNotOpen } - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return nil, err } - return podInfo.GetDocStore().Find(name, expr, limit) + return podInfo.GetDocStore().Find(name, expr, podInfo.GetPodPassword(), limit) } // DocBatch initiates a batch inserting session. -func (d *API) DocBatch(sessionId, podName, name string) (*collection.DocBatch, error) { +func (a *API) DocBatch(sessionId, podName, name string) (*collection.DocBatch, error) { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return nil, ErrUserNotLoggedIn } @@ -232,18 +232,18 @@ func (d *API) DocBatch(sessionId, podName, name string) (*collection.DocBatch, e return nil, ErrPodNotOpen } - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return nil, err } - return podInfo.GetDocStore().CreateDocBatch(name) + return podInfo.GetDocStore().CreateDocBatch(name, podInfo.GetPodPassword()) } // DocBatchPut inserts records in to a document batch. -func (d *API) DocBatchPut(sessionId, podName string, doc []byte, docBatch *collection.DocBatch) error { +func (a *API) DocBatchPut(sessionId, podName string, doc []byte, docBatch *collection.DocBatch) error { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return ErrUserNotLoggedIn } @@ -253,7 +253,7 @@ func (d *API) DocBatchPut(sessionId, podName string, doc []byte, docBatch *colle return ErrPodNotOpen } - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return err } @@ -262,9 +262,9 @@ func (d *API) DocBatchPut(sessionId, podName string, doc []byte, docBatch *colle } // DocBatchWrite commits the batch document insert. -func (d *API) DocBatchWrite(sessionId, podName string, docBatch *collection.DocBatch) error { +func (a *API) DocBatchWrite(sessionId, podName string, docBatch *collection.DocBatch) error { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return ErrUserNotLoggedIn } @@ -274,7 +274,7 @@ func (d *API) DocBatchWrite(sessionId, podName string, docBatch *collection.DocB return ErrPodNotOpen } - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return err } @@ -283,9 +283,9 @@ func (d *API) DocBatchWrite(sessionId, podName string, docBatch *collection.DocB } // DocIndexJson indexes a json files in to the document DB. -func (d *API) DocIndexJson(sessionId, podName, name, podFileWithPath string) error { +func (a *API) DocIndexJson(sessionId, podName, name, podFileWithPath string) error { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return ErrUserNotLoggedIn } @@ -296,7 +296,7 @@ func (d *API) DocIndexJson(sessionId, podName, name, podFileWithPath string) err } // check if file present - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return err } @@ -305,5 +305,5 @@ func (d *API) DocIndexJson(sessionId, podName, name, podFileWithPath string) err return ErrFileNotPresent } - return podInfo.GetDocStore().DocFileIndex(name, podFileWithPath) + return podInfo.GetDocStore().DocFileIndex(name, podFileWithPath, podInfo.GetPodPassword()) } diff --git a/pkg/dfs/errors.go b/pkg/dfs/errors.go index 124cf780..f6bcd80b 100644 --- a/pkg/dfs/errors.go +++ b/pkg/dfs/errors.go @@ -24,7 +24,8 @@ var ( // ErrPodNotOpen indicates pod is not open ErrPodNotOpen = errors.New("pod not open") // ErrFileNotPresent indicates file is not present - ErrFileNotPresent = errors.New("file not present") + ErrFileNotPresent = errors.New("file not present") + ErrFileAlreadyPresent = errors.New("file already exist with new name") errPodAlreadyOpen = errors.New("pod already open") ErrBeeClient = errors.New("could not connect to bee client") diff --git a/pkg/dfs/fs_api.go b/pkg/dfs/fs_api.go index d68d4a3f..61690809 100644 --- a/pkg/dfs/fs_api.go +++ b/pkg/dfs/fs_api.go @@ -30,9 +30,9 @@ import ( // Mkdir is a controller function which validates if the user is logged in, // pod is open and calls the make directory function in the dir object. -func (d *API) Mkdir(podName, dirToCreateWithPath, sessionId string) error { +func (a *API) Mkdir(podName, dirToCreateWithPath, sessionId string) error { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return ErrUserNotLoggedIn } @@ -43,23 +43,42 @@ func (d *API) Mkdir(podName, dirToCreateWithPath, sessionId string) error { } // get the dir object and make directory - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, podPassword, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return err } directory := podInfo.GetDirectory() - err = directory.MkDir(dirToCreateWithPath) + return directory.MkDir(dirToCreateWithPath, podPassword) +} + +// RenameDir is a controller function which validates if the user is logged in, +// pod is open and calls the rename directory function in the dir object. +func (a *API) RenameDir(podName, dirToRenameWithPath, newName, sessionId string) error { + // get the logged in user information + ui := a.users.GetLoggedInUserInfo(sessionId) + if ui == nil { + return ErrUserNotLoggedIn + } + + // check if pod open + if !ui.IsPodOpen(podName) { + return ErrPodNotOpen + } + + // get the dir object and rename directory + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return err } - return nil + directory := podInfo.GetDirectory() + return directory.RenameDir(dirToRenameWithPath, newName, podInfo.GetPodPassword()) } // IsDirPresent is acontroller function which validates if the user is logged in, // pod is open and calls the dir object to check if the directory is present. -func (d *API) IsDirPresent(podName, directoryNameWithPath, sessionId string) (bool, error) { +func (a *API) IsDirPresent(podName, directoryNameWithPath, sessionId string) (bool, error) { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return false, ErrUserNotLoggedIn } @@ -70,21 +89,21 @@ func (d *API) IsDirPresent(podName, directoryNameWithPath, sessionId string) (bo } // get pod Info - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, podPassword, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return false, err } directory := podInfo.GetDirectory() - - dirPresent := directory.IsDirectoryPresent(directoryNameWithPath) + directoryNameWithPath = filepath.ToSlash(directoryNameWithPath) + dirPresent := directory.IsDirectoryPresent(directoryNameWithPath, podPassword) return dirPresent, nil } // RmDir is a controller function which validates if the user is logged in, // pod is open and calls the dir object to remove the supplied directory. -func (d *API) RmDir(podName, directoryNameWithPath, sessionId string) error { +func (a *API) RmDir(podName, directoryNameWithPath, sessionId string) error { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return ErrUserNotLoggedIn } @@ -95,23 +114,19 @@ func (d *API) RmDir(podName, directoryNameWithPath, sessionId string) error { } // get the dir object and remove directory - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return err } directory := podInfo.GetDirectory() - err = directory.RmDir(directoryNameWithPath) - if err != nil { - return err - } - return nil + return directory.RmDir(directoryNameWithPath, podInfo.GetPodPassword()) } // ListDir is a controller function which validates if the user is logged in, // pod is open and calls the dir object to list the contents of the supplied directory. -func (d *API) ListDir(podName, currentDir, sessionId string) ([]dir.Entry, []f.Entry, error) { +func (a *API) ListDir(podName, currentDir, sessionId string) ([]dir.Entry, []f.Entry, error) { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return nil, nil, ErrUserNotLoggedIn } @@ -122,7 +137,7 @@ func (d *API) ListDir(podName, currentDir, sessionId string) ([]dir.Entry, []f.E } // get the dir object and list directory - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return nil, nil, err } @@ -133,12 +148,12 @@ func (d *API) ListDir(podName, currentDir, sessionId string) ([]dir.Entry, []f.E if directory.GetDirFromDirectoryMap(totalPath) == nil { return nil, nil, dir.ErrDirectoryNotPresent } - dEntries, fileList, err := directory.ListDir(currentDir) + dEntries, fileList, err := directory.ListDir(currentDir, podInfo.GetPodPassword()) if err != nil { return nil, nil, err } file := podInfo.GetFile() - fEntries, err := file.ListFiles(fileList) + fEntries, err := file.ListFiles(fileList, podInfo.GetPodPassword()) if err != nil { return nil, nil, err } @@ -147,9 +162,9 @@ func (d *API) ListDir(podName, currentDir, sessionId string) ([]dir.Entry, []f.E // DirectoryStat is a controller function which validates if the user is logged in, // pod is open and calls the dir object to get the information about the given directory. -func (d *API) DirectoryStat(podName, directoryName, sessionId string) (*dir.Stats, error) { +func (a *API) DirectoryStat(podName, directoryName, sessionId string) (*dir.Stats, error) { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return nil, ErrUserNotLoggedIn } @@ -160,24 +175,52 @@ func (d *API) DirectoryStat(podName, directoryName, sessionId string) (*dir.Stat } // get the dir object and stat directory - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return nil, err } directory := podInfo.GetDirectory() - ds, err := directory.DirStat(podName, directoryName) + ds, err := directory.DirStat(podName, podInfo.GetPodPassword(), directoryName) if err != nil { return nil, err } return ds, nil } +// DirectoryInode is a controller function which validates if the user is logged in, +// pod is open and calls the dir object to get the inode info about the given directory. +func (a *API) DirectoryInode(podName, directoryName, sessionId string) (*dir.Inode, error) { + // get the logged in user information + ui := a.users.GetLoggedInUserInfo(sessionId) + if ui == nil { + return nil, ErrUserNotLoggedIn + } + + // check if pod open + if !ui.IsPodOpen(podName) { + return nil, ErrPodNotOpen + } + + // get the dir object and stat directory + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) + if err != nil { + return nil, err + } + directory := podInfo.GetDirectory() + inode := directory.GetDirFromDirectoryMap(directoryName) + if inode == nil { + a.logger.Errorf("dir not found: %s", directoryName) + return nil, fmt.Errorf("dir not found") + } + return inode, nil +} + // DeleteFile is a controller function which validates if the user is logged in, // pod is open and delete the file. It also remove the file entry from the parent // directory. -func (d *API) DeleteFile(podName, podFileWithPath, sessionId string) error { +func (a *API) DeleteFile(podName, podFileWithPath, sessionId string) error { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return ErrUserNotLoggedIn } @@ -187,7 +230,7 @@ func (d *API) DeleteFile(podName, podFileWithPath, sessionId string) error { return ErrPodNotOpen } - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return err } @@ -199,7 +242,7 @@ func (d *API) DeleteFile(podName, podFileWithPath, sessionId string) error { directory := podInfo.GetDirectory() file := podInfo.GetFile() - err = file.RmFile(podFileWithPath) + err = file.RmFile(podFileWithPath, podInfo.GetPodPassword()) if err != nil { if err == f.ErrDeletedFeed { return pod.ErrInvalidFile @@ -210,14 +253,14 @@ func (d *API) DeleteFile(podName, podFileWithPath, sessionId string) error { // update the directory by removing the file from it fileDir := filepath.Dir(podFileWithPath) fileName := filepath.Base(podFileWithPath) - return directory.RemoveEntryFromDir(fileDir, fileName, true) + return directory.RemoveEntryFromDir(fileDir, podInfo.GetPodPassword(), fileName, true) } // FileStat is a controller function which validates if the user is logged in, // pod is open and gets the information about the file. -func (d *API) FileStat(podName, podFileWithPath, sessionId string) (*f.Stats, error) { +func (a *API) FileStat(podName, podFileWithPath, sessionId string) (*f.Stats, error) { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return nil, ErrUserNotLoggedIn } @@ -227,12 +270,12 @@ func (d *API) FileStat(podName, podFileWithPath, sessionId string) (*f.Stats, er return nil, ErrPodNotOpen } - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return nil, err } file := podInfo.GetFile() - ds, err := file.GetStats(podName, podFileWithPath) + ds, err := file.GetStats(podName, podFileWithPath, podInfo.GetPodPassword()) if err != nil { return nil, err } @@ -242,9 +285,9 @@ func (d *API) FileStat(podName, podFileWithPath, sessionId string) (*f.Stats, er // UploadFile is a controller function which validates if the user is logged in, // // pod is open and calls the upload function. -func (d *API) UploadFile(podName, podFileName, sessionId string, fileSize int64, fd io.Reader, podPath, compression string, blockSize uint32, overwrite bool) error { +func (a *API) UploadFile(podName, podFileName, sessionId string, fileSize int64, fd io.Reader, podPath, compression string, blockSize uint32, overwrite bool) error { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return ErrUserNotLoggedIn } @@ -254,48 +297,158 @@ func (d *API) UploadFile(podName, podFileName, sessionId string, fileSize int64, return ErrPodNotOpen } - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return err } file := podInfo.GetFile() directory := podInfo.GetDirectory() + podPath = filepath.ToSlash(podPath) // check if file exists, then backup the file totalPath := utils.CombinePathAndFile(podPath, podFileName) alreadyPresent := file.IsFileAlreadyPresent(totalPath) if alreadyPresent && !overwrite { - m, err := file.BackupFromFileName(totalPath) + m, err := file.BackupFromFileName(totalPath, podInfo.GetPodPassword()) if err != nil { return err } - err = directory.AddEntryToDir(podPath, m.Name, true) + err = directory.AddEntryToDir(podPath, podInfo.GetPodPassword(), m.Name, true) if err != nil { return err } - err = directory.RemoveEntryFromDir(podPath, podFileName, true) + err = directory.RemoveEntryFromDir(podPath, podInfo.GetPodPassword(), podFileName, true) if err != nil { return err } - alreadyPresent = false } - err = file.Upload(fd, podFileName, fileSize, blockSize, podPath, compression) + err = file.Upload(fd, podFileName, fileSize, blockSize, podPath, compression, podInfo.GetPodPassword()) if err != nil { return err } + // add the file to the directory metadata - if !alreadyPresent { - return directory.AddEntryToDir(podPath, podFileName, true) + return directory.AddEntryToDir(podPath, podInfo.GetPodPassword(), podFileName, true) +} + +// RenameFile is a controller function which validates if the user is logged in, +// +// pod is open and calls renaming of a file +func (a *API) RenameFile(podName, fileNameWithPath, newFileNameWithPath, sessionId string) error { + // get the logged in user information + ui := a.users.GetLoggedInUserInfo(sessionId) + if ui == nil { + return ErrUserNotLoggedIn } - return nil + + // check if pod open + if !ui.IsPodOpen(podName) { + return ErrPodNotOpen + } + + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) + if err != nil { + return err + } + file := podInfo.GetFile() + directory := podInfo.GetDirectory() + + fileNameWithPath = filepath.ToSlash(fileNameWithPath) + newFileNameWithPath = filepath.ToSlash(newFileNameWithPath) + + // check if file exists + if !file.IsFileAlreadyPresent(fileNameWithPath) { + return ErrFileNotPresent + } + if file.IsFileAlreadyPresent(newFileNameWithPath) { + return ErrFileAlreadyPresent + } + + m, err := file.RenameFromFileName(fileNameWithPath, newFileNameWithPath, podInfo.GetPodPassword()) + if err != nil { + return err + } + oldPrnt := filepath.ToSlash(filepath.Dir(fileNameWithPath)) + newPrnt := filepath.ToSlash(filepath.Dir(newFileNameWithPath)) + + // add the file to the directory metadata + err = directory.AddEntryToDir(newPrnt, podInfo.GetPodPassword(), m.Name, true) + if err != nil { + return err + } + + return directory.RemoveEntryFromDir(oldPrnt, podInfo.GetPodPassword(), filepath.Base(fileNameWithPath), true) } // DownloadFile is a controller function which validates if the user is logged in, // pod is open and calls the download function. -func (d *API) DownloadFile(podName, podFileWithPath, sessionId string) (io.ReadCloser, uint64, error) { +func (a *API) DownloadFile(podName, podFileWithPath, sessionId string) (io.ReadCloser, uint64, error) { + // get the logged in user information + ui := a.users.GetLoggedInUserInfo(sessionId) + if ui == nil { + return nil, 0, ErrUserNotLoggedIn + } + + // check if pod open + if !ui.IsPodOpen(podName) { + return nil, 0, ErrPodNotOpen + } + + // check if logged in to pod + if !ui.GetPod().IsPodOpened(podName) { + return nil, 0, fmt.Errorf("login to pod to do this operation") + } + + // get podInfo and construct the path + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) + if err != nil { + return nil, 0, err + } + + // download the file by creating the reader + file := podInfo.GetFile() + reader, size, err := file.Download(podFileWithPath, podInfo.GetPodPassword()) + if err != nil { + return nil, 0, err + } + return reader, size, nil +} + +// WriteAtFile is a controller function which writes a file from a given offset +// +// pod is open and calls writeAt of a file +func (a *API) WriteAtFile(podName, fileNameWithPath, sessionId string, update io.Reader, offset uint64, truncate bool) (int, error) { + // get the logged in user information + ui := a.users.GetLoggedInUserInfo(sessionId) + if ui == nil { + return 0, ErrUserNotLoggedIn + } + + // check if pod open + if !ui.IsPodOpen(podName) { + return 0, ErrPodNotOpen + } + + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) + if err != nil { + return 0, err + } + file := podInfo.GetFile() + fileNameWithPath = filepath.ToSlash(fileNameWithPath) + // check if file exists + if !file.IsFileAlreadyPresent(fileNameWithPath) { + return 0, ErrFileNotPresent + } + + return file.WriteAt(fileNameWithPath, podInfo.GetPodPassword(), update, offset, truncate) +} + +// ReadSeekCloser is a controller function which validates if the user is logged in, +// pod is open and calls the download function. +func (a *API) ReadSeekCloser(podName, podFileWithPath, sessionId string) (io.ReadSeekCloser, uint64, error) { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return nil, 0, ErrUserNotLoggedIn } @@ -311,14 +464,14 @@ func (d *API) DownloadFile(podName, podFileWithPath, sessionId string) (io.ReadC } // get podInfo and construct the path - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return nil, 0, err } // download the file by creating the reader file := podInfo.GetFile() - reader, size, err := file.Download(podFileWithPath) + reader, size, err := file.ReadSeeker(podFileWithPath, podInfo.GetPodPassword()) if err != nil { return nil, 0, err } @@ -327,9 +480,9 @@ func (d *API) DownloadFile(podName, podFileWithPath, sessionId string) (io.ReadC // ShareFile is a controller function which validates if the user is logged in, // pod is open and calls the sharefile function. -func (d *API) ShareFile(podName, podFileWithPath, destinationUser, sessionId string) (string, error) { +func (a *API) ShareFile(podName, podFileWithPath, destinationUser, sessionId string) (string, error) { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return "", ErrUserNotLoggedIn } @@ -340,12 +493,12 @@ func (d *API) ShareFile(podName, podFileWithPath, destinationUser, sessionId str } // get podInfo and construct the path - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return "", err } - sharingRef, err := d.users.ShareFileWithUser(podName, podFileWithPath, destinationUser, ui, ui.GetPod(), podInfo.GetAccountInfo().GetAddress()) + sharingRef, err := a.users.ShareFileWithUser(podName, podInfo.GetPodPassword(), podFileWithPath, destinationUser, ui, ui.GetPod(), podInfo.GetAccountInfo().GetAddress()) if err != nil { return "", err } @@ -355,9 +508,9 @@ func (d *API) ShareFile(podName, podFileWithPath, destinationUser, sessionId str // ReceiveFile is a controller function which validates if the user is logged in, // pod is open and calls the ReceiveFile function to get the shared file in to the // given pod. -func (d *API) ReceiveFile(podName, sessionId string, sharingRef utils.SharingReference, dir string) (string, error) { +func (a *API) ReceiveFile(podName, sessionId string, sharingRef utils.SharingReference, dir string) (string, error) { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return "", ErrUserNotLoggedIn } @@ -367,23 +520,18 @@ func (d *API) ReceiveFile(podName, sessionId string, sharingRef utils.SharingRef return "", ErrPodNotOpen } - return d.users.ReceiveFileFromUser(podName, sharingRef, ui, ui.GetPod(), dir) + return a.users.ReceiveFileFromUser(podName, sharingRef, ui, ui.GetPod(), dir) } // ReceiveInfo is a controller function which validates if the user is logged in, -// pod is open and calls the ReceiveInfo function to display the shared files +// calls the ReceiveInfo function to display the shared files // information. -func (d *API) ReceiveInfo(podName, sessionId string, sharingRef utils.SharingReference) (*user.ReceiveFileInfo, error) { +func (a *API) ReceiveInfo(sessionId string, sharingRef utils.SharingReference) (*user.ReceiveFileInfo, error) { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return nil, ErrUserNotLoggedIn } - // check if pod open - if !ui.IsPodOpen(podName) { - return nil, ErrPodNotOpen - } - - return d.users.ReceiveFileInfo(sharingRef) + return a.users.ReceiveFileInfo(sharingRef) } diff --git a/pkg/dfs/kv_api.go b/pkg/dfs/kv_api.go index 51c94e8c..3a1877aa 100644 --- a/pkg/dfs/kv_api.go +++ b/pkg/dfs/kv_api.go @@ -21,9 +21,9 @@ import ( ) // KVCreate does validation checks and calls the create KVtable function. -func (d *API) KVCreate(sessionId, podName, name string, indexType collection.IndexType) error { +func (a *API) KVCreate(sessionId, podName, name string, indexType collection.IndexType) error { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return ErrUserNotLoggedIn } @@ -33,18 +33,18 @@ func (d *API) KVCreate(sessionId, podName, name string, indexType collection.Ind return ErrPodNotOpen } - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return err } - return podInfo.GetKVStore().CreateKVTable(name, indexType) + return podInfo.GetKVStore().CreateKVTable(name, podInfo.GetPodPassword(), indexType) } // KVDelete does validation checks and calls the delete KVtable function. -func (d *API) KVDelete(sessionId, podName, name string) error { +func (a *API) KVDelete(sessionId, podName, name string) error { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return ErrUserNotLoggedIn } @@ -54,18 +54,18 @@ func (d *API) KVDelete(sessionId, podName, name string) error { return ErrPodNotOpen } - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return err } - return podInfo.GetKVStore().DeleteKVTable(name) + return podInfo.GetKVStore().DeleteKVTable(name, podInfo.GetPodPassword()) } // KVOpen does validation checks and calls the open KVtable function. -func (d *API) KVOpen(sessionId, podName, name string) error { +func (a *API) KVOpen(sessionId, podName, name string) error { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return ErrUserNotLoggedIn } @@ -75,18 +75,18 @@ func (d *API) KVOpen(sessionId, podName, name string) error { return ErrPodNotOpen } - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return err } - return podInfo.GetKVStore().OpenKVTable(name) + return podInfo.GetKVStore().OpenKVTable(name, podInfo.GetPodPassword()) } // KVList does validation checks and calls the list KVtable function. -func (d *API) KVList(sessionId, podName string) (map[string][]string, error) { +func (a *API) KVList(sessionId, podName string) (map[string][]string, error) { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return nil, ErrUserNotLoggedIn } @@ -95,18 +95,18 @@ func (d *API) KVList(sessionId, podName string) (map[string][]string, error) { if !ui.IsPodOpen(podName) { return nil, ErrPodNotOpen } - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return nil, err } - return podInfo.GetKVStore().LoadKVTables() + return podInfo.GetKVStore().LoadKVTables(podInfo.GetPodPassword()) } // KVCount does validation checks and calls the count KVtable function. -func (d *API) KVCount(sessionId, podName, name string) (*collection.TableKeyCount, error) { +func (a *API) KVCount(sessionId, podName, name string) (*collection.TableKeyCount, error) { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return nil, ErrUserNotLoggedIn } @@ -116,18 +116,18 @@ func (d *API) KVCount(sessionId, podName, name string) (*collection.TableKeyCoun return nil, ErrPodNotOpen } - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return nil, err } - return podInfo.GetKVStore().KVCount(name) + return podInfo.GetKVStore().KVCount(name, podInfo.GetPodPassword()) } // KVPut does validation checks and calls the put KVtable function. -func (d *API) KVPut(sessionId, podName, name, key string, value []byte) error { +func (a *API) KVPut(sessionId, podName, name, key string, value []byte) error { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return ErrUserNotLoggedIn } @@ -137,7 +137,7 @@ func (d *API) KVPut(sessionId, podName, name, key string, value []byte) error { return ErrPodNotOpen } - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return err } @@ -146,9 +146,9 @@ func (d *API) KVPut(sessionId, podName, name, key string, value []byte) error { } // KVGet does validation checks and calls the get KVtable function. -func (d *API) KVGet(sessionId, podName, name, key string) ([]string, []byte, error) { +func (a *API) KVGet(sessionId, podName, name, key string) ([]string, []byte, error) { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return nil, nil, ErrUserNotLoggedIn } @@ -158,7 +158,7 @@ func (d *API) KVGet(sessionId, podName, name, key string) ([]string, []byte, err return nil, nil, ErrPodNotOpen } - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return nil, nil, err } @@ -167,9 +167,9 @@ func (d *API) KVGet(sessionId, podName, name, key string) ([]string, []byte, err } // KVDel does validation checks and calls the delete KVtable function. -func (d *API) KVDel(sessionId, podName, name, key string) ([]byte, error) { +func (a *API) KVDel(sessionId, podName, name, key string) ([]byte, error) { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return nil, ErrUserNotLoggedIn } @@ -178,7 +178,7 @@ func (d *API) KVDel(sessionId, podName, name, key string) ([]byte, error) { if !ui.IsPodOpen(podName) { return nil, ErrPodNotOpen } - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return nil, err } @@ -187,9 +187,9 @@ func (d *API) KVDel(sessionId, podName, name, key string) ([]byte, error) { } // KVBatch does validation checks and calls the batch KVtable function. -func (d *API) KVBatch(sessionId, podName, name string, columns []string) (*collection.Batch, error) { +func (a *API) KVBatch(sessionId, podName, name string, columns []string) (*collection.Batch, error) { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return nil, ErrUserNotLoggedIn } @@ -199,7 +199,7 @@ func (d *API) KVBatch(sessionId, podName, name string, columns []string) (*colle return nil, ErrPodNotOpen } - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return nil, err } @@ -208,9 +208,9 @@ func (d *API) KVBatch(sessionId, podName, name string, columns []string) (*colle } // KVBatchPut does validation checks and calls the batch put KVtable function. -func (d *API) KVBatchPut(sessionId, podName, key string, value []byte, batch *collection.Batch) error { +func (a *API) KVBatchPut(sessionId, podName, key string, value []byte, batch *collection.Batch) error { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return ErrUserNotLoggedIn } @@ -224,9 +224,9 @@ func (d *API) KVBatchPut(sessionId, podName, key string, value []byte, batch *co } // KVBatchWrite does validation checks and calls the batch write KVtable function. -func (d *API) KVBatchWrite(sessionId, podName string, batch *collection.Batch) error { +func (a *API) KVBatchWrite(sessionId, podName string, batch *collection.Batch) error { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return ErrUserNotLoggedIn } @@ -241,9 +241,9 @@ func (d *API) KVBatchWrite(sessionId, podName string, batch *collection.Batch) e } // KVSeek does validation checks and calls the seek KVtable function. -func (d *API) KVSeek(sessionId, podName, name, start, end string, limit int64) (*collection.Iterator, error) { +func (a *API) KVSeek(sessionId, podName, name, start, end string, limit int64) (*collection.Iterator, error) { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return nil, ErrUserNotLoggedIn } @@ -253,7 +253,7 @@ func (d *API) KVSeek(sessionId, podName, name, start, end string, limit int64) ( return nil, ErrPodNotOpen } - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return nil, err } @@ -262,9 +262,9 @@ func (d *API) KVSeek(sessionId, podName, name, start, end string, limit int64) ( } // KVGetNext does validation checks and calls the get next KVtable function. -func (d *API) KVGetNext(sessionId, podName, name string) ([]string, string, []byte, error) { +func (a *API) KVGetNext(sessionId, podName, name string) ([]string, string, []byte, error) { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return nil, "", nil, ErrUserNotLoggedIn } @@ -274,7 +274,7 @@ func (d *API) KVGetNext(sessionId, podName, name string) ([]string, string, []by return nil, "", nil, ErrPodNotOpen } - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return nil, "", nil, err } diff --git a/pkg/dfs/pod_api.go b/pkg/dfs/pod_api.go index 5b1da247..88bd4d4e 100644 --- a/pkg/dfs/pod_api.go +++ b/pkg/dfs/pod_api.go @@ -17,32 +17,35 @@ limitations under the License. package dfs import ( - "fmt" + "context" + "encoding/hex" "github.com/fairdatasociety/fairOS-dfs/pkg/pod" "github.com/fairdatasociety/fairOS-dfs/pkg/utils" ) -func (d *API) CreatePod(podName, passPhrase, sessionId string) (*pod.Info, error) { +func (a *API) CreatePod(podName, sessionId string) (*pod.Info, error) { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return nil, ErrUserNotLoggedIn } + podPasswordBytes, _ := utils.GetRandBytes(pod.PodPasswordLength) + podPassword := hex.EncodeToString(podPasswordBytes) // create the pod - _, err := ui.GetPod().CreatePod(podName, passPhrase, "") + _, err := ui.GetPod().CreatePod(podName, "", podPassword) if err != nil { return nil, err } // open the pod - pi, err := ui.GetPod().OpenPod(podName, passPhrase) + pi, err := ui.GetPod().OpenPod(podName) if err != nil { return nil, err } // create the root directory - err = pi.GetDirectory().MkRootDir(pi.GetPodName(), pi.GetPodAddress(), pi.GetFeed()) + err = pi.GetDirectory().MkRootDir(pi.GetPodName(), podPassword, pi.GetPodAddress(), pi.GetFeed()) if err != nil { return nil, err } @@ -53,22 +56,16 @@ func (d *API) CreatePod(podName, passPhrase, sessionId string) (*pod.Info, error } // DeletePod deletes a pod -func (d *API) DeletePod(podName, passphrase, sessionId string) error { +func (a *API) DeletePod(podName, sessionId string) error { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return ErrUserNotLoggedIn } - // check for valid password - acc := ui.GetAccount() - if !acc.Authorise(passphrase) { - return fmt.Errorf("invalid password") - } - // delete all the directory, files, and database tables under this pod from // the Swarm network. - podInfo, err := ui.GetPod().GetPodInfoFromPodMap(podName) + podInfo, _, err := ui.GetPod().GetPodInfoFromPodMap(podName) if err != nil { return err } @@ -90,7 +87,7 @@ func (d *API) DeletePod(podName, passphrase, sessionId string) error { return nil } - err = directory.RmRootDir() + err = directory.RmRootDir(podInfo.GetPodPassword()) if err != nil { return err } @@ -110,38 +107,57 @@ func (d *API) DeletePod(podName, passphrase, sessionId string) error { return nil } -func (d *API) OpenPod(podName, passPhrase, sessionId string) (*pod.Info, error) { +func (a *API) OpenPod(podName, sessionId string) (*pod.Info, error) { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return nil, ErrUserNotLoggedIn } - // return if pod already open if ui.IsPodOpen(podName) { return nil, errPodAlreadyOpen } - // open the pod - pi, err := ui.GetPod().OpenPod(podName, passPhrase) + pi, err := ui.GetPod().OpenPod(podName) if err != nil { return nil, err } - - err = pi.GetDirectory().AddRootDir(pi.GetPodName(), pi.GetPodAddress(), pi.GetFeed()) + err = pi.GetDirectory().AddRootDir(pi.GetPodName(), pi.GetPodPassword(), pi.GetPodAddress(), pi.GetFeed()) if err != nil { return nil, err } - // Add podName in the login user session ui.AddPodName(podName, pi) + return pi, nil +} +func (a *API) OpenPodAsync(ctx context.Context, podName, sessionId string) (*pod.Info, error) { + // get the logged-in user information + ui := a.users.GetLoggedInUserInfo(sessionId) + if ui == nil { + return nil, ErrUserNotLoggedIn + } + // return if pod already open + if ui.IsPodOpen(podName) { + return nil, errPodAlreadyOpen + } + // open the pod + pi, err := ui.GetPod().OpenPodAsync(ctx, podName) + if err != nil { + return nil, err + } + err = pi.GetDirectory().AddRootDir(pi.GetPodName(), pi.GetPodPassword(), pi.GetPodAddress(), pi.GetFeed()) + if err != nil { + return nil, err + } + // Add podName in the login user session + ui.AddPodName(podName, pi) return pi, nil } -func (d *API) ClosePod(podName, sessionId string) error { +func (a *API) ClosePod(podName, sessionId string) error { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return ErrUserNotLoggedIn } @@ -162,9 +178,9 @@ func (d *API) ClosePod(podName, sessionId string) error { return nil } -func (d *API) PodStat(podName, sessionId string) (*pod.Stat, error) { +func (a *API) PodStat(podName, sessionId string) (*pod.Stat, error) { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return nil, ErrUserNotLoggedIn } @@ -177,9 +193,9 @@ func (d *API) PodStat(podName, sessionId string) (*pod.Stat, error) { return podStat, nil } -func (d *API) SyncPod(podName, sessionId string) error { +func (a *API) SyncPod(podName, sessionId string) error { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return ErrUserNotLoggedIn } @@ -197,9 +213,9 @@ func (d *API) SyncPod(podName, sessionId string) error { return nil } -func (d *API) ListPods(sessionId string) ([]string, []string, error) { +func (a *API) ListPods(sessionId string) ([]string, []string, error) { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return nil, nil, ErrUserNotLoggedIn } @@ -212,24 +228,36 @@ func (d *API) ListPods(sessionId string) ([]string, []string, error) { return pods, sharedPods, nil } -func (d *API) PodShare(podName, sharedPodName, passPhrase, sessionId string) (string, error) { +// PodList lists all available pods in json format +func (a *API) PodList(sessionId string) (*pod.PodList, error) { + // get the logged in user information + ui := a.users.GetLoggedInUserInfo(sessionId) + if ui == nil { + return nil, ErrUserNotLoggedIn + } + + // list pods of a user + return ui.GetPod().PodList() +} + +func (a *API) PodShare(podName, sharedPodName, sessionId string) (string, error) { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return "", ErrUserNotLoggedIn } // get the pod stat - address, err := ui.GetPod().PodShare(podName, sharedPodName, passPhrase) + address, err := ui.GetPod().PodShare(podName, sharedPodName) if err != nil { return "", err } return address, nil } -func (d *API) PodReceiveInfo(sessionId string, ref utils.Reference) (*pod.ShareInfo, error) { +func (a *API) PodReceiveInfo(sessionId string, ref utils.Reference) (*pod.ShareInfo, error) { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return nil, ErrUserNotLoggedIn } @@ -237,9 +265,9 @@ func (d *API) PodReceiveInfo(sessionId string, ref utils.Reference) (*pod.ShareI return ui.GetPod().ReceivePodInfo(ref) } -func (d *API) PodReceive(sessionId, sharedPodName string, ref utils.Reference) (*pod.Info, error) { +func (a *API) PodReceive(sessionId, sharedPodName string, ref utils.Reference) (*pod.Info, error) { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return nil, ErrUserNotLoggedIn } @@ -247,8 +275,8 @@ func (d *API) PodReceive(sessionId, sharedPodName string, ref utils.Reference) ( return ui.GetPod().ReceivePod(sharedPodName, ref) } -func (d *API) IsPodExist(podName, sessionId string) bool { - ui := d.users.GetLoggedInUserInfo(sessionId) +func (a *API) IsPodExist(podName, sessionId string) bool { + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return false } diff --git a/pkg/dfs/user_api.go b/pkg/dfs/user_api.go index a7a8f6c5..af0264d6 100644 --- a/pkg/dfs/user_api.go +++ b/pkg/dfs/user_api.go @@ -21,103 +21,67 @@ import ( ) // CreateUserV2 is a controller function which calls the create user function from the user object. -func (d *API) CreateUserV2(userName, passPhrase, mnemonic, sessionId string) (string, string, string, string, *user.Info, error) { - return d.users.CreateNewUserV2(userName, passPhrase, mnemonic, sessionId) +func (a *API) CreateUserV2(userName, passPhrase, mnemonic, sessionId string) (string, string, string, string, *user.Info, error) { + return a.users.CreateNewUserV2(userName, passPhrase, mnemonic, sessionId, a.tm) } // LoginUserV2 is a controller function which calls the users login function. -func (d *API) LoginUserV2(userName, passPhrase, sessionId string) (*user.Info, string, string, error) { - return d.users.LoginUserV2(userName, passPhrase, d.client, sessionId) -} - -// CreateUser is a controller function which calls the create user function from the user object. -// FOR MIGRATION PURPOSE ONLY -func (d *API) CreateUser(userName, passPhrase, mnemonic, sessionId string) (string, string, *user.Info, error) { - return d.users.CreateNewUser(userName, passPhrase, mnemonic, sessionId) -} - -// LoginUser is a controller function which calls the users login function. -// FOR MIGRATION PURPOSE ONLY -func (d *API) LoginUser(userName, passPhrase, sessionId string) (*user.Info, error) { - return d.users.LoginUser(userName, passPhrase, d.dataDir, d.client, sessionId) +func (a *API) LoginUserV2(userName, passPhrase, sessionId string) (*user.Info, string, string, error) { + return a.users.LoginUserV2(userName, passPhrase, a.client, a.tm, sessionId) } // LogoutUser is a controller function which gets the logged in user information and logs it out. -func (d *API) LogoutUser(sessionId string) error { - // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) - if ui == nil { - return ErrUserNotLoggedIn - } - - return d.users.LogoutUser(ui.GetUserName(), sessionId) -} - -// DeleteUser is a controller function which deletes a logged in user. -func (d *API) DeleteUser(passPhrase, sessionId string) error { +func (a *API) LogoutUser(sessionId string) error { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return ErrUserNotLoggedIn } - return d.users.DeleteUser(ui.GetUserName(), d.dataDir, passPhrase, sessionId, ui) + return a.users.LogoutUser(ui.GetUserName(), sessionId) } // DeleteUserV2 is a controller function which deletes a logged in user. -func (d *API) DeleteUserV2(passPhrase, sessionId string) error { +func (a *API) DeleteUserV2(passPhrase, sessionId string) error { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return ErrUserNotLoggedIn } - return d.users.DeleteUserV2(ui.GetUserName(), passPhrase, sessionId, ui) -} - -// IsUserNameAvailable checks if a given user name is available in this dfs server. -func (d *API) IsUserNameAvailable(userName string) bool { - return d.users.IsUsernameAvailable(userName, d.dataDir) + return a.users.DeleteUserV2(ui.GetUserName(), passPhrase, sessionId, ui) } // IsUserNameAvailableV2 checks if a given user name is available in this dfs server. -func (d *API) IsUserNameAvailableV2(userName string) bool { - return d.users.IsUsernameAvailableV2(userName) +func (a *API) IsUserNameAvailableV2(userName string) bool { + return a.users.IsUsernameAvailableV2(userName) } // IsUserLoggedIn checks if the given user is logged in -func (d *API) IsUserLoggedIn(userName string) bool { +func (a *API) IsUserLoggedIn(userName string) bool { // check if a given user is logged in - return d.users.IsUserNameLoggedIn(userName) + return a.users.IsUserNameLoggedIn(userName) } // GetUserStat gets the information related to the user. -func (d *API) GetUserStat(sessionId string) (*user.Stat, error) { +func (a *API) GetUserStat(sessionId string) (*user.Stat, error) { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return nil, ErrUserNotLoggedIn } - return d.users.GetUserStat(ui) -} - -// ExportUser exports the currently logged in user. -func (d *API) ExportUser(sessionId string) (string, string, error) { - // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) - if ui == nil { - return "", "", ErrUserNotLoggedIn - } - return d.users.ExportUser(ui) + return a.users.GetUserStat(ui) } +/* // MigrateUser is a controller function which migrates user credentials to swarm from local storage -func (d *API) MigrateUser(username, passPhrase, sessionId string) error { +func (a *API) MigrateUser(username, passPhrase, sessionId string) error { // get the logged in user information - ui := d.users.GetLoggedInUserInfo(sessionId) + ui := a.users.GetLoggedInUserInfo(sessionId) if ui == nil { return ErrUserNotLoggedIn } - return d.users.MigrateUser(ui.GetUserName(), username, d.dataDir, passPhrase, sessionId, d.client, ui) + return a.users.MigrateUser(ui.GetUserName(), username, a.dataDir, passPhrase, sessionId, a.client, ui) } +*/ diff --git a/pkg/dir/dir.go b/pkg/dir/dir.go index 06602cfa..b94746ae 100644 --- a/pkg/dir/dir.go +++ b/pkg/dir/dir.go @@ -17,8 +17,14 @@ limitations under the License. package dir import ( + "context" + "encoding/json" + "fmt" + "strconv" "sync" + "github.com/fairdatasociety/fairOS-dfs/pkg/taskmanager" + "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore" "github.com/fairdatasociety/fairOS-dfs/pkg/feed" f "github.com/fairdatasociety/fairOS-dfs/pkg/file" @@ -36,10 +42,12 @@ type Directory struct { dirMap map[string]*Inode // path to dirInode cache dirMu *sync.RWMutex logger logging.Logger + syncManager taskmanager.TaskManagerGO } // NewDirectory the main directory object that handles all the directory related functions. -func NewDirectory(podName string, client blockstore.Client, fd *feed.API, user utils.Address, file f.IFile, logger logging.Logger) *Directory { +func NewDirectory(podName string, client blockstore.Client, fd *feed.API, user utils.Address, + file f.IFile, m taskmanager.TaskManagerGO, logger logging.Logger) *Directory { return &Directory{ podName: podName, client: client, @@ -49,6 +57,7 @@ func NewDirectory(podName string, client blockstore.Client, fd *feed.API, user u dirMap: make(map[string]*Inode), dirMu: &sync.RWMutex{}, logger: logger, + syncManager: m, } } @@ -88,3 +97,78 @@ func (d *Directory) RemoveAllFromDirectoryMap() { defer d.dirMu.Unlock() d.dirMap = make(map[string]*Inode) } + +type syncTask struct { + d *Directory + path string + podPassword string + wg *sync.WaitGroup +} + +func newSyncTask(d *Directory, path, podPassword string, wg *sync.WaitGroup) *syncTask { + return &syncTask{ + d: d, + path: path, + wg: wg, + podPassword: podPassword, + } +} + +func (st *syncTask) Execute(context.Context) error { + defer st.wg.Done() + return st.d.file.LoadFileMeta(st.path, st.podPassword) +} + +func (st *syncTask) Name() string { + return st.path +} + +type lsTask struct { + d *Directory + podPassword string + topic []byte + path string + entries *[]Entry + mtx sync.Locker + wg *sync.WaitGroup +} + +func newLsTask(d *Directory, topic []byte, path, podPassword string, l *[]Entry, mtx sync.Locker, wg *sync.WaitGroup) *lsTask { + return &lsTask{ + d: d, + podPassword: podPassword, + topic: topic, + path: path, + entries: l, + mtx: mtx, + wg: wg, + } +} + +func (lt *lsTask) Execute(context.Context) error { + defer lt.wg.Done() + _, data, err := lt.d.fd.GetFeedData(lt.topic, lt.d.getAddress(), []byte(lt.podPassword)) + if err != nil { // skipcq: TCV-001 + return fmt.Errorf("list dir : %v", err) + } + var dirInode *Inode + err = json.Unmarshal(data, &dirInode) + if err != nil { // skipcq: TCV-001 + return fmt.Errorf("list dir : %v", err) + } + entry := Entry{ + Name: dirInode.Meta.Name, + ContentType: MineTypeDirectory, // per RFC2425 + CreationTime: strconv.FormatInt(dirInode.Meta.CreationTime, 10), + AccessTime: strconv.FormatInt(dirInode.Meta.AccessTime, 10), + ModificationTime: strconv.FormatInt(dirInode.Meta.ModificationTime, 10), + } + lt.mtx.Lock() + defer lt.mtx.Unlock() + *lt.entries = append(*lt.entries, entry) + return nil +} + +func (lt *lsTask) Name() string { + return lt.path +} diff --git a/pkg/dir/dir_present.go b/pkg/dir/dir_present.go index 29e1f0a2..39ffc56f 100644 --- a/pkg/dir/dir_present.go +++ b/pkg/dir/dir_present.go @@ -21,9 +21,9 @@ import ( ) // IsDirectoryPresent this function check if a given directory is present inside the pod. -func (d *Directory) IsDirectoryPresent(directoryNameWithPath string) bool { +func (d *Directory) IsDirectoryPresent(directoryNameWithPath, podPassword string) bool { topic := utils.HashString(directoryNameWithPath) - _, metaBytes, err := d.fd.GetFeedData(topic, d.userAddress) + _, metaBytes, err := d.fd.GetFeedData(topic, d.userAddress, []byte(podPassword)) if string(metaBytes) == utils.DeletedFeedMagicWord { return false } diff --git a/pkg/dir/dir_present_test.go b/pkg/dir/dir_present_test.go index 3c7756c6..bdd414e9 100644 --- a/pkg/dir/dir_present_test.go +++ b/pkg/dir/dir_present_test.go @@ -19,6 +19,12 @@ package dir_test import ( "io" "testing" + "time" + + "github.com/fairdatasociety/fairOS-dfs/pkg/pod" + "github.com/fairdatasociety/fairOS-dfs/pkg/utils" + + "github.com/plexsysio/taskmanager" "github.com/fairdatasociety/fairOS-dfs/pkg/account" bm "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" @@ -32,45 +38,47 @@ func TestDirPresent(t *testing.T) { mockClient := bm.NewMockBeeClient() logger := logging.New(io.Discard, 0) acc := account.New(logger) - _, _, err := acc.CreateUserAccount("password", "") + _, _, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } - pod1AccountInfo, err := acc.CreatePodAccount(1, "password", false) + pod1AccountInfo, err := acc.CreatePodAccount(1, false) if err != nil { t.Fatal(err) } fd := feed.New(pod1AccountInfo, mockClient, logger) user := acc.GetAddress(1) mockFile := fm.NewMockFile() + tm := taskmanager.New(1, 10, time.Second*15, logger) t.Run("dir-present", func(t *testing.T) { - dirObject := dir.NewDirectory("pod1", mockClient, fd, user, mockFile, logger) + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + dirObject := dir.NewDirectory("pod1", mockClient, fd, user, mockFile, tm, logger) // make root dir so that other directories can be added - err = dirObject.MkRootDir("pod1", user, fd) + err = dirObject.MkRootDir("pod1", podPassword, user, fd) if err != nil { t.Fatal(err) } // create a new dir - err := dirObject.MkDir("/baseDir") + err := dirObject.MkDir("/baseDir", podPassword) if err != nil { t.Fatal(err) } // check if dir is present - present := dirObject.IsDirectoryPresent("/baseDir") + present := dirObject.IsDirectoryPresent("/baseDir", podPassword) if !present { t.Fatalf("directory is not present") } - err = dirObject.RmDir("/baseDir") + err = dirObject.RmDir("/baseDir", podPassword) if err != nil { t.Fatal(err) } - present = dirObject.IsDirectoryPresent("/baseDir") + present = dirObject.IsDirectoryPresent("/baseDir", podPassword) if present { t.Fatalf("directory is present") } diff --git a/pkg/dir/dir_test.go b/pkg/dir/dir_test.go index fcb874da..c7812b9a 100644 --- a/pkg/dir/dir_test.go +++ b/pkg/dir/dir_test.go @@ -3,6 +3,12 @@ package dir_test import ( "io" "testing" + "time" + + "github.com/fairdatasociety/fairOS-dfs/pkg/pod" + "github.com/fairdatasociety/fairOS-dfs/pkg/utils" + + "github.com/plexsysio/taskmanager" "github.com/fairdatasociety/fairOS-dfs/pkg/account" bm "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" @@ -16,35 +22,37 @@ func TestDirRmAllFromMap(t *testing.T) { mockClient := bm.NewMockBeeClient() logger := logging.New(io.Discard, 0) acc := account.New(logger) - _, _, err := acc.CreateUserAccount("password", "") + _, _, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } - pod1AccountInfo, err := acc.CreatePodAccount(1, "password", false) + pod1AccountInfo, err := acc.CreatePodAccount(1, false) if err != nil { t.Fatal(err) } fd := feed.New(pod1AccountInfo, mockClient, logger) user := acc.GetAddress(1) mockFile := fm.NewMockFile() + tm := taskmanager.New(1, 10, time.Second*15, logger) t.Run("dir-rm-all-from-map", func(t *testing.T) { - dirObject := dir.NewDirectory("pod1", mockClient, fd, user, mockFile, logger) + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + dirObject := dir.NewDirectory("pod1", mockClient, fd, user, mockFile, tm, logger) // make root dir so that other directories can be added - err = dirObject.MkRootDir("pod1", user, fd) + err = dirObject.MkRootDir("pod1", podPassword, user, fd) if err != nil { t.Fatal(err) } // create a new dir - err := dirObject.MkDir("/baseDir") + err := dirObject.MkDir("/baseDir", podPassword) if err != nil { t.Fatal(err) } // check if dir is present - present := dirObject.IsDirectoryPresent("/baseDir") + present := dirObject.IsDirectoryPresent("/baseDir", podPassword) if !present { t.Fatalf("directory is not present") } diff --git a/pkg/dir/inode.go b/pkg/dir/inode.go index 8977cfe3..c96876ae 100644 --- a/pkg/dir/inode.go +++ b/pkg/dir/inode.go @@ -24,24 +24,27 @@ import ( ) type Inode struct { - Meta *MetaData - FileOrDirNames []string + Meta *MetaData `json:"meta"` + FileOrDirNames []string `json:"fileOrDirNames"` } var ( ErrResourceDeleted = errors.New("resource was deleted") ) +// GetMeta returns iNode metadata // skipcq: TCV-001 func (in *Inode) GetMeta() *MetaData { return in.Meta } +// GetFileOrDirNames returns file and folder names in iNode // skipcq: TCV-001 func (in *Inode) GetFileOrDirNames() []string { return in.FileOrDirNames } +// SetFileOrDirNames sets file and folder names in iNode // skipcq: TCV-001 func (in *Inode) SetFileOrDirNames(fileOrDirNames []string) { in.FileOrDirNames = fileOrDirNames diff --git a/pkg/dir/ls.go b/pkg/dir/ls.go index 23ef5204..06ea86f2 100644 --- a/pkg/dir/ls.go +++ b/pkg/dir/ls.go @@ -17,10 +17,10 @@ limitations under the License. package dir import ( - "encoding/json" "fmt" - "strconv" + "path/filepath" "strings" + "sync" "github.com/fairdatasociety/fairOS-dfs/pkg/utils" ) @@ -31,20 +31,21 @@ const ( type Entry struct { Name string `json:"name"` - ContentType string `json:"content_type"` + ContentType string `json:"contentType"` Size string `json:"size,omitempty"` - BlockSize string `json:"block_size,omitempty"` - CreationTime string `json:"creation_time"` - ModificationTime string `json:"modification_time"` - AccessTime string `json:"access_time"` + BlockSize string `json:"blockSize,omitempty"` + CreationTime string `json:"creationTime"` + ModificationTime string `json:"modificationTime"` + AccessTime string `json:"accessTime"` } // ListDir given a directory, this function lists all the children (directory) inside the given directory. // it also creates a list of files inside the directory and gives it back, so that the file listing // function can give information about those files. -func (d *Directory) ListDir(dirNameWithPath string) ([]Entry, []string, error) { - topic := utils.HashString(utils.CombinePathAndFile(dirNameWithPath, "")) - _, data, err := d.fd.GetFeedData(topic, d.getAddress()) +func (d *Directory) ListDir(dirNameWithPath, podPassword string) ([]Entry, []string, error) { + dirNameWithPath = filepath.ToSlash(dirNameWithPath) + topic := utils.HashString(dirNameWithPath) + _, data, err := d.fd.GetFeedData(topic, d.getAddress(), []byte(podPassword)) if err != nil { // skipcq: TCV-001 if dirNameWithPath == utils.PathSeparator { return nil, nil, nil @@ -52,42 +53,33 @@ func (d *Directory) ListDir(dirNameWithPath string) ([]Entry, []string, error) { return nil, nil, fmt.Errorf("list dir : %v", err) // skipcq: TCV-001 } - var dirInode Inode + dirInode := &Inode{} err = dirInode.Unmarshal(data) if err != nil { return nil, nil, fmt.Errorf("list dir : %v", err) } - var listEntries []Entry + wg := new(sync.WaitGroup) + mtx := &sync.Mutex{} + listEntries := &[]Entry{} var files []string for _, fileOrDirName := range dirInode.FileOrDirNames { if strings.HasPrefix(fileOrDirName, "_D_") { dirName := strings.TrimPrefix(fileOrDirName, "_D_") dirPath := utils.CombinePathAndFile(dirNameWithPath, dirName) dirTopic := utils.HashString(dirPath) - _, data, err := d.fd.GetFeedData(dirTopic, d.getAddress()) - if err != nil { // skipcq: TCV-001 + wg.Add(1) + lsTask := newLsTask(d, dirTopic, dirPath, podPassword, listEntries, mtx, wg) + _, err := d.syncManager.Go(lsTask) + if err != nil { return nil, nil, fmt.Errorf("list dir : %v", err) } - - var dirInode *Inode - err = json.Unmarshal(data, &dirInode) - if err != nil { // skipcq: TCV-001 - continue - } - entry := Entry{ - Name: dirInode.Meta.Name, - ContentType: MineTypeDirectory, // per RFC2425 - CreationTime: strconv.FormatInt(dirInode.Meta.CreationTime, 10), - AccessTime: strconv.FormatInt(dirInode.Meta.AccessTime, 10), - ModificationTime: strconv.FormatInt(dirInode.Meta.ModificationTime, 10), - } - listEntries = append(listEntries, entry) } else if strings.HasPrefix(fileOrDirName, "_F_") { fileName := strings.TrimPrefix(fileOrDirName, "_F_") filePath := utils.CombinePathAndFile(dirNameWithPath, fileName) files = append(files, filePath) } } - return listEntries, files, nil + wg.Wait() + return *listEntries, files, nil } diff --git a/pkg/dir/ls_test.go b/pkg/dir/ls_test.go index d90d9fef..99ed8df4 100644 --- a/pkg/dir/ls_test.go +++ b/pkg/dir/ls_test.go @@ -17,9 +17,16 @@ limitations under the License. package dir_test import ( + "context" "errors" "io" + "sort" "testing" + "time" + + "github.com/fairdatasociety/fairOS-dfs/pkg/pod" + + "github.com/plexsysio/taskmanager" "github.com/fairdatasociety/fairOS-dfs/pkg/utils" @@ -35,95 +42,108 @@ func TestListDirectory(t *testing.T) { mockClient := bm.NewMockBeeClient() logger := logging.New(io.Discard, 0) acc := account.New(logger) - _, _, err := acc.CreateUserAccount("password", "") + _, _, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } - pod1AccountInfo, err := acc.CreatePodAccount(1, "password", false) + pod1AccountInfo, err := acc.CreatePodAccount(1, false) if err != nil { t.Fatal(err) } fd := feed.New(pod1AccountInfo, mockClient, logger) user := acc.GetAddress(1) mockFile := fm.NewMockFile() + tm := taskmanager.New(1, 10, time.Second*15, logger) + defer func() { + _ = tm.Stop(context.Background()) + }() t.Run("list-dirr", func(t *testing.T) { - dirObject := dir.NewDirectory("pod1", mockClient, fd, user, mockFile, logger) + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + dirObject := dir.NewDirectory("pod1", mockClient, fd, user, mockFile, tm, logger) // make root dir so that other directories can be added - err = dirObject.MkRootDir("pod1", user, fd) + err = dirObject.MkRootDir("pod1", podPassword, user, fd) if err != nil { t.Fatal(err) } - err := dirObject.MkDir("/") + err := dirObject.MkDir("/", podPassword) if !errors.Is(err, dir.ErrInvalidDirectoryName) { - t.Fatal("invalid dir name") + t.Fatal("invalid dir name", err) } longDirName, err := utils.GetRandString(101) if err != nil { t.Fatal(err) } - err = dirObject.MkDir("/" + longDirName) + err = dirObject.MkDir("/"+longDirName, podPassword) if !errors.Is(err, dir.ErrTooLongDirectoryName) { t.Fatal("dir name too long") } // create some dir and files - err = dirObject.MkDir("/parentDir") + err = dirObject.MkDir("/parentDir", podPassword) if err != nil { t.Fatal(err) } - err = dirObject.MkDir("/parentDir") + err = dirObject.MkDir("/parentDir", podPassword) if !errors.Is(err, dir.ErrDirectoryAlreadyPresent) { t.Fatal("dir already present") } // populate the directory with few directory and files - err = dirObject.MkDir("/parentDir/subDir1") + err = dirObject.MkDir("/parentDir/subDir1", podPassword) if err != nil { t.Fatal(err) } - err = dirObject.MkDir("/parentDir/subDir2") + err = dirObject.MkDir("/parentDir/subDir2", podPassword) if err != nil { t.Fatal(err) } - err = dirObject.AddEntryToDir("", "file1", true) + err = dirObject.AddEntryToDir("", podPassword, "file1", true) if !errors.Is(err, dir.ErrInvalidDirectoryName) { t.Fatal("invalid dir name") } - err = dirObject.AddEntryToDir("/parentDir", "", true) + err = dirObject.AddEntryToDir("/parentDir", podPassword, "", true) if !errors.Is(err, dir.ErrInvalidFileOrDirectoryName) { t.Fatal("invalid file or dir name") } - err = dirObject.AddEntryToDir("/parentDir-not-available", "file1", true) + err = dirObject.AddEntryToDir("/parentDir-not-available", podPassword, "file1", true) if !errors.Is(err, dir.ErrDirectoryNotPresent) { t.Fatal("parent not available") } // just add dummy file enty as file listing is not tested here - err = dirObject.AddEntryToDir("/parentDir", "file1", true) + err = dirObject.AddEntryToDir("/parentDir", podPassword, "file1", true) if err != nil { t.Fatal(err) } - err = dirObject.AddEntryToDir("/parentDir", "file2", true) + err = dirObject.AddEntryToDir("/parentDir", podPassword, "file2", true) if err != nil { t.Fatal(err) } // validate dir listing - dirs, files, err := dirObject.ListDir("/parentDir") + dirEntries, files, err := dirObject.ListDir("/parentDir", podPassword) if err != nil { t.Fatal(err) } + dirs := []string{} + + for _, v := range dirEntries { + dirs = append(dirs, v.Name) + } + if len(dirs) != 2 { t.Fatalf("invalid directory entry count") } + sort.Strings(dirs) + sort.Strings(files) // validate entry names - if dirs[0].Name != "subDir1" { + if dirs[0] != "subDir1" { t.Fatalf("invalid directory name") } - if dirs[1].Name != "subDir2" { + if dirs[1] != "subDir2" { t.Fatalf("invalid directory name") } if files[0] != "/parentDir/file1" { @@ -135,32 +155,32 @@ func TestListDirectory(t *testing.T) { }) t.Run("list-dir-from-different-dir-object", func(t *testing.T) { - dirObject := dir.NewDirectory("pod1", mockClient, fd, user, mockFile, logger) + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + dirObject := dir.NewDirectory("pod1", mockClient, fd, user, mockFile, tm, logger) // make root dir so that other directories can be added - err = dirObject.MkRootDir("pod1", user, fd) + err = dirObject.MkRootDir("pod1", podPassword, user, fd) if err != nil { t.Fatal(err) } // create dir - err = dirObject.MkDir("/parentDir") + err = dirObject.MkDir("/parentDir", podPassword) if err != nil { t.Fatal(err) } // populate the directory with few directory and files - err = dirObject.MkDir("/parentDir/subDir1") + err = dirObject.MkDir("/parentDir/subDir1", podPassword) if err != nil { t.Fatal(err) } - - dirObject2 := dir.NewDirectory("pod1", mockClient, fd, user, mockFile, logger) - err = dirObject2.AddRootDir("pod1", user, fd) + dirObject2 := dir.NewDirectory("pod1", mockClient, fd, user, mockFile, tm, logger) + err = dirObject2.AddRootDir("pod1", podPassword, user, fd) if err != nil { t.Fatal(err) } // validate dir listing - dirs, _, err := dirObject2.ListDir("/parentDir") + dirs, _, err := dirObject2.ListDir("/parentDir", podPassword) if err != nil { t.Fatal(err) } diff --git a/pkg/dir/meta.go b/pkg/dir/meta.go index ded47888..5425bb94 100644 --- a/pkg/dir/meta.go +++ b/pkg/dir/meta.go @@ -17,14 +17,14 @@ limitations under the License. package dir var ( - MetaVersion uint8 = 1 + MetaVersion uint8 = 2 ) type MetaData struct { - Version uint8 - Path string - Name string - CreationTime int64 - AccessTime int64 - ModificationTime int64 + Version uint8 `json:"version"` + Path string `json:"path"` + Name string `json:"name"` + CreationTime int64 `json:"creationTime"` + AccessTime int64 `json:"accessTime"` + ModificationTime int64 `json:"modificationTime"` } diff --git a/pkg/dir/mkdir.go b/pkg/dir/mkdir.go index 4bc240dd..739a8605 100644 --- a/pkg/dir/mkdir.go +++ b/pkg/dir/mkdir.go @@ -30,12 +30,12 @@ const ( nameLength = 100 ) -func (d *Directory) MkDir(dirToCreateWithPath string) error { - parentPath := filepath.Dir(dirToCreateWithPath) +func (d *Directory) MkDir(dirToCreateWithPath, podPassword string) error { + parentPath := filepath.ToSlash(filepath.Dir(dirToCreateWithPath)) dirName := filepath.Base(dirToCreateWithPath) // validation checks of the arguments - if dirName == "" || strings.HasPrefix(dirName, utils.PathSeparator) { + if dirName == "" || strings.HasPrefix(filepath.ToSlash(dirName), utils.PathSeparator) { return ErrInvalidDirectoryName } @@ -75,14 +75,14 @@ func (d *Directory) MkDir(dirToCreateWithPath string) error { } // upload the metadata as blob - previousAddr, _, err := d.fd.GetFeedData(topic, d.userAddress) + previousAddr, _, err := d.fd.GetFeedData(topic, d.userAddress, []byte(podPassword)) if err == nil && previousAddr != nil { - _, err = d.fd.UpdateFeed(topic, d.userAddress, data) + _, err = d.fd.UpdateFeed(topic, d.userAddress, data, []byte(podPassword)) if err != nil { // skipcq: TCV-001 return err } } else { - _, err = d.fd.CreateFeed(topic, d.userAddress, data) + _, err = d.fd.CreateFeed(topic, d.userAddress, data, []byte(podPassword)) if err != nil { // skipcq: TCV-001 return err } @@ -93,7 +93,7 @@ func (d *Directory) MkDir(dirToCreateWithPath string) error { // get the parent directory entry and add this new directory to its list of children parentHash := utils.HashString(utils.CombinePathAndFile(parentPath, "")) dirName = "_D_" + dirName - _, parentData, err := d.fd.GetFeedData(parentHash, d.userAddress) + _, parentData, err := d.fd.GetFeedData(parentHash, d.userAddress, []byte(podPassword)) if err != nil { return err } @@ -111,7 +111,8 @@ func (d *Directory) MkDir(dirToCreateWithPath string) error { if err != nil { // skipcq: TCV-001 return err } - _, err = d.fd.UpdateFeed(parentHash, d.userAddress, parentData) + + _, err = d.fd.UpdateFeed(parentHash, d.userAddress, parentData, []byte(podPassword)) if err != nil { // skipcq: TCV-001 return err } @@ -119,7 +120,7 @@ func (d *Directory) MkDir(dirToCreateWithPath string) error { return nil } -func (d *Directory) MkRootDir(podName string, podAddress utils.Address, fd *feed.API) error { +func (d *Directory) MkRootDir(podName, podPassword string, podAddress utils.Address, fd *feed.API) error { // create the root parent dir now := time.Now().Unix() meta := MetaData{ @@ -140,14 +141,14 @@ func (d *Directory) MkRootDir(podName string, podAddress utils.Address, fd *feed } parentPath := utils.CombinePathAndFile(utils.PathSeparator, "") parentHash := utils.HashString(parentPath) - addr, data, err := d.fd.GetFeedData(parentHash, d.userAddress) + addr, data, err := d.fd.GetFeedData(parentHash, d.userAddress, []byte(podPassword)) if err == nil && addr != nil && data != nil { - _, err = fd.UpdateFeed(parentHash, podAddress, parentData) + _, err = fd.UpdateFeed(parentHash, podAddress, parentData, []byte(podPassword)) if err != nil { // skipcq: TCV-001 return err } } else { - _, err = fd.CreateFeed(parentHash, podAddress, parentData) + _, err = fd.CreateFeed(parentHash, podAddress, parentData, []byte(podPassword)) if err != nil { // skipcq: TCV-001 return err } @@ -156,10 +157,10 @@ func (d *Directory) MkRootDir(podName string, podAddress utils.Address, fd *feed return nil } -func (d *Directory) AddRootDir(podName string, podAddress utils.Address, fd *feed.API) error { +func (d *Directory) AddRootDir(podName, podPassword string, podAddress utils.Address, fd *feed.API) error { parentPath := utils.CombinePathAndFile(utils.PathSeparator, "") parentHash := utils.HashString(parentPath) - _, parentDataBytes, err := fd.GetFeedData(parentHash, podAddress) + _, parentDataBytes, err := fd.GetFeedData(parentHash, podAddress, []byte(podPassword)) if err != nil { return err } diff --git a/pkg/dir/mkdir_test.go b/pkg/dir/mkdir_test.go index 042e4331..0ae86853 100644 --- a/pkg/dir/mkdir_test.go +++ b/pkg/dir/mkdir_test.go @@ -17,8 +17,15 @@ limitations under the License. package dir_test import ( + "context" "io" "testing" + "time" + + "github.com/fairdatasociety/fairOS-dfs/pkg/pod" + "github.com/fairdatasociety/fairOS-dfs/pkg/utils" + + "github.com/plexsysio/taskmanager" "github.com/fairdatasociety/fairOS-dfs/pkg/account" bm "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" @@ -32,34 +39,40 @@ func TestMkdir(t *testing.T) { mockClient := bm.NewMockBeeClient() logger := logging.New(io.Discard, 0) acc := account.New(logger) - _, _, err := acc.CreateUserAccount("password", "") + _, _, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } - pod1AccountInfo, err := acc.CreatePodAccount(1, "password", false) + pod1AccountInfo, err := acc.CreatePodAccount(1, false) if err != nil { t.Fatal(err) } + tm := taskmanager.New(1, 10, time.Second*15, logger) + defer func() { + _ = tm.Stop(context.Background()) + }() fd := feed.New(pod1AccountInfo, mockClient, logger) user := acc.GetAddress(1) mockFile := fm.NewMockFile() + t.Run("simple-mkdir", func(t *testing.T) { - dirObject := dir.NewDirectory("pod1", mockClient, fd, user, mockFile, logger) + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + dirObject := dir.NewDirectory("pod1", mockClient, fd, user, mockFile, tm, logger) // make root dir so that other directories can be added - err = dirObject.MkRootDir("pod1", user, fd) + err = dirObject.MkRootDir("pod1", podPassword, user, fd) if err != nil { t.Fatal(err) } // create a new dir - err := dirObject.MkDir("/baseDir") + err := dirObject.MkDir("/baseDir", podPassword) if err != nil { t.Fatal(err) } // validate dir - dirs, _, err := dirObject.ListDir("/") + dirs, _, err := dirObject.ListDir("/", podPassword) if err != nil { t.Fatal(err) } @@ -70,38 +83,40 @@ func TestMkdir(t *testing.T) { t.Fatalf("invalid directory name") } }) + t.Run("complicated-mkdir", func(t *testing.T) { - dirObject := dir.NewDirectory("pod1", mockClient, fd, user, mockFile, logger) + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + dirObject := dir.NewDirectory("pod1", mockClient, fd, user, mockFile, tm, logger) // make root dir so that other directories can be added - err = dirObject.MkRootDir("pod1", user, fd) + err = dirObject.MkRootDir("pod1", podPassword, user, fd) if err != nil { t.Fatal(err) } // try to create a new dir without creating root - err := dirObject.MkDir("/baseDir/baseDir2/baseDir3/baseDir4") + err := dirObject.MkDir("/baseDir/baseDir2/baseDir3/baseDir4", podPassword) if err == nil || err != dir.ErrDirectoryNotPresent { t.Fatal(err) } - err = dirObject.MkDir("/baseDir") + err = dirObject.MkDir("/baseDir", podPassword) if err != nil { t.Fatal(err) } - err = dirObject.MkDir("/baseDir/baseDir2") + err = dirObject.MkDir("/baseDir/baseDir2", podPassword) if err != nil { t.Fatal(err) } - err = dirObject.MkDir("/baseDir/baseDir2/baseDir3") + err = dirObject.MkDir("/baseDir/baseDir2/baseDir3", podPassword) if err != nil { t.Fatal(err) } // validate dir - dirs, _, err := dirObject.ListDir("/baseDir") + dirs, _, err := dirObject.ListDir("/baseDir", podPassword) if err != nil { t.Fatal(err) } @@ -112,7 +127,7 @@ func TestMkdir(t *testing.T) { t.Fatalf("invalid directory name") } - dirs, _, err = dirObject.ListDir("/baseDir/baseDir2") + dirs, _, err = dirObject.ListDir("/baseDir/baseDir2", podPassword) if err != nil { t.Fatal(err) } diff --git a/pkg/dir/modify_dir_entry.go b/pkg/dir/modify_dir_entry.go index e21719a4..a1a590c3 100644 --- a/pkg/dir/modify_dir_entry.go +++ b/pkg/dir/modify_dir_entry.go @@ -27,7 +27,7 @@ import ( // AddEntryToDir adds a new entry (directory/file) to a given directory. // This is typically called when a new directory is created under the given directory or // a new file is uploaded under the given directory. -func (d *Directory) AddEntryToDir(parentDir, itemToAdd string, isFile bool) error { +func (d *Directory) AddEntryToDir(parentDir, podPassword, itemToAdd string, isFile bool) error { // validation checks of the arguments if parentDir == "" { return ErrInvalidDirectoryName @@ -44,7 +44,7 @@ func (d *Directory) AddEntryToDir(parentDir, itemToAdd string, isFile bool) erro // get the latest meta from swarm topic := utils.HashString(parentDir) - _, data, err := d.fd.GetFeedData(topic, d.userAddress) + _, data, err := d.fd.GetFeedData(topic, d.userAddress, []byte(podPassword)) if err != nil { // skipcq: TCV-001 return fmt.Errorf("modify dir entry: %v", err) } @@ -69,7 +69,7 @@ func (d *Directory) AddEntryToDir(parentDir, itemToAdd string, isFile bool) erro if err != nil { // skipcq: TCV-001 return fmt.Errorf("modify dir entry : %v", err) } - _, err = d.fd.UpdateFeed(topic, d.userAddress, data) + _, err = d.fd.UpdateFeed(topic, d.userAddress, data, []byte(podPassword)) if err != nil { // skipcq: TCV-001 return fmt.Errorf("modify dir entry : %v", err) } @@ -80,7 +80,7 @@ func (d *Directory) AddEntryToDir(parentDir, itemToAdd string, isFile bool) erro // RemoveEntryFromDir removes a entry (directory/file) under the given directory. // This is typically called when a directory is deleted under the given directory or // a file is removed under the given directory. -func (d *Directory) RemoveEntryFromDir(parentDir, itemToDelete string, isFile bool) error { +func (d *Directory) RemoveEntryFromDir(parentDir, podPassword, itemToDelete string, isFile bool) error { // validation checks of the arguments if parentDir == "" { // skipcq: TCV-001 return ErrInvalidDirectoryName @@ -91,7 +91,7 @@ func (d *Directory) RemoveEntryFromDir(parentDir, itemToDelete string, isFile bo } parentHash := utils.HashString(parentDir) - _, parentData, err := d.fd.GetFeedData(parentHash, d.userAddress) + _, parentData, err := d.fd.GetFeedData(parentHash, d.userAddress, []byte(podPassword)) if err != nil { // skipcq: TCV-001 return err } @@ -121,7 +121,7 @@ func (d *Directory) RemoveEntryFromDir(parentDir, itemToDelete string, isFile bo if err != nil { // skipcq: TCV-001 return err } - _, err = d.fd.UpdateFeed(parentHash, d.userAddress, parentData) + _, err = d.fd.UpdateFeed(parentHash, d.userAddress, parentData, []byte(podPassword)) if err != nil { // skipcq: TCV-001 return err } diff --git a/pkg/dir/rename.go b/pkg/dir/rename.go new file mode 100644 index 00000000..2e9c69ab --- /dev/null +++ b/pkg/dir/rename.go @@ -0,0 +1,223 @@ +package dir + +import ( + "encoding/json" + "fmt" + "path/filepath" + "strings" + "time" + + "github.com/fairdatasociety/fairOS-dfs/pkg/file" + + "github.com/fairdatasociety/fairOS-dfs/pkg/utils" +) + +func (d *Directory) RenameDir(dirNameWithPath, newDirNameWithPath, podPassword string) error { + dirNameWithPath = filepath.ToSlash(dirNameWithPath) + newDirNameWithPath = filepath.ToSlash(newDirNameWithPath) + parentPath := filepath.ToSlash(filepath.Dir(dirNameWithPath)) + dirName := filepath.Base(dirNameWithPath) + + newParentPath := filepath.ToSlash(filepath.Dir(newDirNameWithPath)) + newDirName := filepath.Base(newDirNameWithPath) + + // validation checks of the arguments + if dirName == "" || strings.HasPrefix(dirName, utils.PathSeparator) { // skipcq: TCV-001 + return ErrInvalidDirectoryName + } + + if len(dirName) > nameLength { // skipcq: TCV-001 + return ErrTooLongDirectoryName + } + + if dirName == "/" { + return fmt.Errorf("cannot rename root dir") + } + + // check if directory exists + if d.GetDirFromDirectoryMap(dirNameWithPath) == nil { // skipcq: TCV-001 + return ErrDirectoryNotPresent + } + + // check if parent directory exists + if d.GetDirFromDirectoryMap(parentPath) == nil { // skipcq: TCV-001 + return ErrDirectoryNotPresent + } + if d.GetDirFromDirectoryMap(newDirNameWithPath) != nil { + return ErrDirectoryAlreadyPresent + } + + err := d.mapChildrenToNewPath(dirNameWithPath, newDirNameWithPath, podPassword) + if err != nil { // skipcq: TCV-001 + return err + } + + topic := utils.HashString(dirNameWithPath) + newTopic := utils.HashString(newDirNameWithPath) + _, inodeData, err := d.fd.GetFeedData(topic, d.userAddress, []byte(podPassword)) + if err != nil { + return err + } + + // unmarshall the data and rename the directory entry + var inode *Inode + err = json.Unmarshal(inodeData, &inode) + if err != nil { // skipcq: TCV-001 + return err + } + + inode.Meta.Name = newDirName + inode.Meta.Path = newParentPath + inode.Meta.ModificationTime = time.Now().Unix() + + // upload meta + fileMetaBytes, err := json.Marshal(inode) + if err != nil { // skipcq: TCV-001 + return err + } + + previousAddr, _, err := d.fd.GetFeedData(newTopic, d.userAddress, []byte(podPassword)) + if err == nil && previousAddr != nil { + _, err = d.fd.UpdateFeed(newTopic, d.userAddress, fileMetaBytes, []byte(podPassword)) + if err != nil { // skipcq: TCV-001 + return err + } + } else { + _, err = d.fd.CreateFeed(newTopic, d.userAddress, fileMetaBytes, []byte(podPassword)) + if err != nil { // skipcq: TCV-001 + return err + } + } + + // delete old meta + // update with utils.DeletedFeedMagicWord + _, err = d.fd.UpdateFeed(topic, d.userAddress, []byte(utils.DeletedFeedMagicWord), []byte(podPassword)) + if err != nil { // skipcq: TCV-001 + return err + } + d.RemoveFromDirectoryMap(dirNameWithPath) + + // get the parent directory entry and add this new directory to its list of children + err = d.RemoveEntryFromDir(parentPath, podPassword, dirName, false) + if err != nil { + return err + } + err = d.AddEntryToDir(newParentPath, podPassword, newDirName, false) + if err != nil { + return err + } + + err = d.SyncDirectory(parentPath, podPassword) + if err != nil { + return err + } + + if parentPath != newParentPath { + err = d.SyncDirectory(newParentPath, podPassword) + if err != nil { + return err + } + } + return nil +} + +func (d *Directory) mapChildrenToNewPath(totalPath, newTotalPath, podPassword string) error { + dirInode := d.GetDirFromDirectoryMap(totalPath) + for _, fileOrDirName := range dirInode.FileOrDirNames { + if strings.HasPrefix(fileOrDirName, "_F_") { + fileName := strings.TrimPrefix(fileOrDirName, "_F_") + filePath := utils.CombinePathAndFile(totalPath, fileName) + newFilePath := utils.CombinePathAndFile(newTotalPath, fileName) + topic := utils.HashString(filePath) + _, metaBytes, err := d.fd.GetFeedData(topic, d.userAddress, []byte(podPassword)) + if err != nil { + return err + } + if string(metaBytes) == utils.DeletedFeedMagicWord { + continue + } + + p := &file.MetaData{} + err = json.Unmarshal(metaBytes, p) + if err != nil { // skipcq: TCV-001 + return err + } + newTopic := utils.HashString(newFilePath) + // change previous meta.Name + p.Path = newTotalPath + p.ModificationTime = time.Now().Unix() + // upload meta + fileMetaBytes, err := json.Marshal(p) + if err != nil { // skipcq: TCV-001 + return err + } + + previousAddr, _, err := d.fd.GetFeedData(newTopic, d.userAddress, []byte(podPassword)) + if err == nil && previousAddr != nil { + _, err = d.fd.UpdateFeed(newTopic, d.userAddress, fileMetaBytes, []byte(podPassword)) + if err != nil { // skipcq: TCV-001 + return err + } + } else { + _, err = d.fd.CreateFeed(newTopic, d.userAddress, fileMetaBytes, []byte(podPassword)) + if err != nil { // skipcq: TCV-001 + return err + } + } + + // delete old meta + // update with utils.DeletedFeedMagicWord + _, err = d.fd.UpdateFeed(topic, d.userAddress, []byte(utils.DeletedFeedMagicWord), []byte(podPassword)) + if err != nil { // skipcq: TCV-001 + return err + } + } else if strings.HasPrefix(fileOrDirName, "_D_") { + dirName := strings.TrimPrefix(fileOrDirName, "_D_") + pathWithDir := utils.CombinePathAndFile(totalPath, dirName) + newPathWithDir := utils.CombinePathAndFile(newTotalPath, dirName) + err := d.mapChildrenToNewPath(pathWithDir, newPathWithDir, podPassword) + if err != nil { // skipcq: TCV-001 + return err + } + topic := utils.HashString(pathWithDir) + newTopic := utils.HashString(newPathWithDir) + _, inodeData, err := d.fd.GetFeedData(topic, d.userAddress, []byte(podPassword)) + if err != nil { + return err + } + // unmarshall the data and add the directory entry to the parent + var inode *Inode + err = json.Unmarshal(inodeData, &inode) + if err != nil { // skipcq: TCV-001 + return err + } + inode.Meta.Path = newTotalPath + inode.Meta.ModificationTime = time.Now().Unix() + // upload meta + fileMetaBytes, err := json.Marshal(inode) + if err != nil { // skipcq: TCV-001 + return err + } + previousAddr, _, err := d.fd.GetFeedData(newTopic, d.userAddress, []byte(podPassword)) + if err == nil && previousAddr != nil { + _, err = d.fd.UpdateFeed(newTopic, d.userAddress, fileMetaBytes, []byte(podPassword)) + if err != nil { // skipcq: TCV-001 + return err + } + } else { + _, err = d.fd.CreateFeed(newTopic, d.userAddress, fileMetaBytes, []byte(podPassword)) + if err != nil { // skipcq: TCV-001 + return err + } + } + + // delete old meta + // update with utils.DeletedFeedMagicWord + _, err = d.fd.UpdateFeed(topic, d.userAddress, []byte(utils.DeletedFeedMagicWord), []byte(podPassword)) + if err != nil { // skipcq: TCV-001 + return err + } + } + } + return nil +} diff --git a/pkg/dir/rename_test.go b/pkg/dir/rename_test.go new file mode 100644 index 00000000..44559a81 --- /dev/null +++ b/pkg/dir/rename_test.go @@ -0,0 +1,432 @@ +package dir_test + +import ( + "bytes" + "context" + "errors" + "io" + "sort" + "testing" + "time" + + "github.com/fairdatasociety/fairOS-dfs/pkg/pod" + + "github.com/fairdatasociety/fairOS-dfs/pkg/file" + + "github.com/fairdatasociety/fairOS-dfs/pkg/account" + bm "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" + "github.com/fairdatasociety/fairOS-dfs/pkg/dir" + "github.com/fairdatasociety/fairOS-dfs/pkg/feed" + "github.com/fairdatasociety/fairOS-dfs/pkg/logging" + "github.com/fairdatasociety/fairOS-dfs/pkg/utils" + "github.com/plexsysio/taskmanager" +) + +func TestRenameDirectory(t *testing.T) { + mockClient := bm.NewMockBeeClient() + logger := logging.New(io.Discard, 0) + acc := account.New(logger) + _, _, err := acc.CreateUserAccount("") + if err != nil { + t.Fatal(err) + } + pod1AccountInfo, err := acc.CreatePodAccount(1, false) + if err != nil { + t.Fatal(err) + } + fd := feed.New(pod1AccountInfo, mockClient, logger) + user := acc.GetAddress(1) + tm := taskmanager.New(1, 10, time.Second*15, logger) + defer func() { + _ = tm.Stop(context.Background()) + }() + + t.Run("rename-dir-same-prnt", func(t *testing.T) { + fileObject := file.NewFile("pod1", mockClient, fd, user, tm, logger) + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + dirObject := dir.NewDirectory("pod1", mockClient, fd, user, fileObject, tm, logger) + // make root dir so that other directories can be added + err = dirObject.MkRootDir("pod1", podPassword, user, fd) + if err != nil { + t.Fatal(err) + } + err := dirObject.MkDir("/", podPassword) + if !errors.Is(err, dir.ErrInvalidDirectoryName) { + t.Fatal("invalid dir name") + } + longDirName, err := utils.GetRandString(101) + if err != nil { + t.Fatal(err) + } + err = dirObject.MkDir("/"+longDirName, podPassword) + if !errors.Is(err, dir.ErrTooLongDirectoryName) { + t.Fatal("dir name too long") + } + + // create some dir and files + err = dirObject.MkDir("/parentDir", podPassword) + if err != nil { + t.Fatal(err) + } + err = dirObject.MkDir("/parentDir", podPassword) + if !errors.Is(err, dir.ErrDirectoryAlreadyPresent) { + t.Fatal("dir already present") + } + // populate the directory with few directory and files + err = dirObject.MkDir("/parentDir/subDir1", podPassword) + if err != nil { + t.Fatal(err) + } + err = dirObject.MkDir("/parentDir/subDir2", podPassword) + if err != nil { + t.Fatal(err) + } + + r := new(bytes.Buffer) + err = fileObject.Upload(r, "file1", 0, 100, "/parentDir", "", podPassword) + if err != nil { + t.Fatal(err) + } + err = fileObject.Upload(r, "file2", 0, 100, "/parentDir", "", podPassword) + if err != nil { + t.Fatal(err) + } + err = fileObject.Upload(r, "file2", 0, 100, "/parentDir/subDir2", "", podPassword) + if err != nil { + t.Fatal(err) + } + // just add dummy file enty as file listing is not tested here + err = dirObject.AddEntryToDir("/parentDir", podPassword, "file1", true) + if err != nil { + t.Fatal(err) + } + err = dirObject.AddEntryToDir("/parentDir", podPassword, "file2", true) + if err != nil { + t.Fatal(err) + } + err = dirObject.AddEntryToDir("/parentDir/subDir2", podPassword, "file2", true) + if err != nil { + t.Fatal(err) + } + // rename + err = dirObject.RenameDir("/parentDir", "/parentNew", podPassword) + if err != nil { + t.Fatal(err) + } + dirEntries, _, err := dirObject.ListDir("/", podPassword) + if err != nil { + t.Fatal(err) + } + if dirEntries[0].Name != "parentNew" { + t.Fatal("rename failed for parentDir") + } + + err = dirObject.MkDir("/parent", podPassword) + if err != nil { + t.Fatal(err) + } + err = dirObject.RenameDir("/parentNew", "/parent", podPassword) + if !errors.Is(err, dir.ErrDirectoryAlreadyPresent) { + t.Fatal("directory name should already be present") + } + + // validate dir listing + dirEntries, files, err := dirObject.ListDir("/parentNew", podPassword) + if err != nil { + t.Fatal(err) + } + dirs := []string{} + + for _, v := range dirEntries { + dirs = append(dirs, v.Name) + } + + if len(dirs) != 2 { + t.Fatalf("invalid directory entry count") + } + if len(files) != 2 { + t.Fatalf("invalid files entry count") + } + + sort.Strings(dirs) + sort.Strings(files) + // validate entry names + if dirs[0] != "subDir1" { + t.Fatalf("invalid directory name") + } + if dirs[1] != "subDir2" { + t.Fatalf("invalid directory name") + } + if files[0] != "/parentNew/file1" { + t.Fatalf("invalid file name") + } + if files[1] != "/parentNew/file2" { + t.Fatalf("invalid file name") + } + + _, files, err = dirObject.ListDir("/parentNew/subDir2", podPassword) + if err != nil { + t.Fatal(err) + } + if len(files) != 1 { + t.Fatal("file count mismatch /parentNew/subDir2") + } + if files[0] != "/parentNew/subDir2/file2" { + t.Fatal("file name mismatch /parentNew/subDir2") + } + + _, n, err := fileObject.Download("/parentNew/subDir2/file2", podPassword) + if err != nil { + t.Fatal(err) + } + if n != 0 { + t.Fatal("file size mismatch") + } + }) + + t.Run("rename-dir-diff-prnt", func(t *testing.T) { + fileObject := file.NewFile("pod1", mockClient, fd, user, tm, logger) + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + dirObject := dir.NewDirectory("pod1", mockClient, fd, user, fileObject, tm, logger) + // make root dir so that other directories can be added + err = dirObject.MkRootDir("pod1", podPassword, user, fd) + if err != nil { + t.Fatal(err) + } + err := dirObject.MkDir("/", podPassword) + if !errors.Is(err, dir.ErrInvalidDirectoryName) { + t.Fatal("invalid dir name") + } + longDirName, err := utils.GetRandString(101) + if err != nil { + t.Fatal(err) + } + err = dirObject.MkDir("/"+longDirName, podPassword) + if !errors.Is(err, dir.ErrTooLongDirectoryName) { + t.Fatal("dir name too long") + } + + // create some dir and files + err = dirObject.MkDir("/parentDir", podPassword) + if err != nil { + t.Fatal(err) + } + + // populate the directory with few directory and files + err = dirObject.MkDir("/parentDir/subDir1", podPassword) + if err != nil { + t.Fatal(err) + } + err = dirObject.MkDir("/parentDir/subDir1/subDir11", podPassword) + if err != nil { + t.Fatal(err) + } + err = dirObject.MkDir("/parentDir/subDir1/subDir11/sub111", podPassword) + if err != nil { + t.Fatal(err) + } + err = dirObject.MkDir("/parentDir/subDir2", podPassword) + if err != nil { + t.Fatal(err) + } + + r := new(bytes.Buffer) + err = fileObject.Upload(r, "file1", 0, 100, "/parentDir/subDir1/subDir11/sub111", "", podPassword) + if err != nil { + t.Fatal(err) + } + + // just add dummy file enty as file listing is not tested here + err = dirObject.AddEntryToDir("/parentDir/subDir1/subDir11/sub111", podPassword, "file1", true) + if err != nil { + t.Fatal(err) + } + + // rename + err = dirObject.RenameDir("/parentDir/subDir1/subDir11/sub111", "/parentDir/subDir2/sub111", podPassword) + if err != nil { + t.Fatal(err) + } + + _, _, err = dirObject.ListDir("/parentDir/subDir1/subDir11/sub111", podPassword) + if err == nil { + t.Fatal("should fail") + } + + dirEntries, files, err := dirObject.ListDir("/parentDir", podPassword) + if err != nil { + t.Fatal(err) + } + dirs := []string{} + + for _, v := range dirEntries { + dirs = append(dirs, v.Name) + } + + if len(dirs) != 2 { + t.Fatalf("invalid directory entry count") + } + if len(files) != 0 { + t.Fatalf("invalid files entry count") + } + + sort.Strings(dirs) + sort.Strings(files) + + if dirs[0] != "subDir1" && dirs[1] != "subDir2" { + t.Fatal("wrong list of directories") + } + + dirEntries, files, err = dirObject.ListDir("/parentDir/subDir1", podPassword) + if err != nil { + t.Fatal(err) + } + + dirs = []string{} + + for _, v := range dirEntries { + dirs = append(dirs, v.Name) + } + + if len(dirs) != 1 { + t.Fatalf("invalid directory entry count") + } + if len(files) != 0 { + t.Fatalf("invalid files entry count") + } + + if dirs[0] != "subDir11" { + t.Fatal("wrong list of directories") + } + + dirEntries, files, err = dirObject.ListDir("/parentDir/subDir2", podPassword) + if err != nil { + t.Fatal(err) + } + dirs = []string{} + + for _, v := range dirEntries { + dirs = append(dirs, v.Name) + } + + if len(dirs) != 1 { + t.Fatalf("invalid directory entry count") + } + if len(files) != 0 { + t.Fatalf("invalid files entry count") + } + + if dirs[0] != "sub111" { + t.Fatal("wrong list of directories") + } + + dirEntries, files, err = dirObject.ListDir("/parentDir/subDir2/sub111", podPassword) + if err != nil { + t.Fatal(err) + } + + if len(dirEntries) != 0 { + t.Fatalf("invalid directory entry count") + } + if len(files) != 1 { + t.Fatalf("invalid files entry count") + } + + if files[0] != "/parentDir/subDir2/sub111/file1" { + t.Fatal("wrong list of files") + } + + err = dirObject.RenameDir("/parentDir/subDir2/sub111", "/parentDir/sub111", podPassword) + if err != nil { + t.Fatal(err) + } + + dirEntries, files, err = dirObject.ListDir("/parentDir", podPassword) + if err != nil { + t.Fatal(err) + } + dirs = []string{} + + for _, v := range dirEntries { + dirs = append(dirs, v.Name) + } + + if len(dirs) != 3 { + t.Fatalf("invalid directory entry count") + } + if len(files) != 0 { + t.Fatalf("invalid files entry count") + } + + sort.Strings(dirs) + sort.Strings(files) + if dirs[0] != "sub111" && dirs[1] != "subDir1" && dirs[2] != "subDir2" { + t.Fatal("wrong list of directories") + } + + // validate dir listing + dirEntries, files, err = dirObject.ListDir("/parentDir/subDir1", podPassword) + if err != nil { + t.Fatal(err) + } + + dirs = []string{} + + for _, v := range dirEntries { + dirs = append(dirs, v.Name) + } + + if len(dirs) != 1 { + t.Fatalf("invalid directory entry count") + } + if len(files) != 0 { + t.Fatalf("invalid files entry count") + } + + if dirs[0] != "subDir11" { + t.Fatal("wrong list of directories") + } + + dirEntries, files, err = dirObject.ListDir("/parentDir/subDir2", podPassword) + if err != nil { + t.Fatal(err) + } + + if len(dirEntries) != 0 { + t.Fatalf("invalid directory entry count") + } + if len(files) != 0 { + t.Fatalf("invalid files entry count") + } + + _, _, err = dirObject.ListDir("/parentDir/subDir2/sub111", podPassword) + if err == nil { + t.Fatal("should be err") + } + + dirEntries, files, err = dirObject.ListDir("/parentDir/sub111", podPassword) + if err != nil { + t.Fatal(err) + } + dirs = []string{} + + for _, v := range dirEntries { + dirs = append(dirs, v.Name) + } + + if len(dirs) != 0 { + t.Fatalf("invalid directory entry count") + } + if len(files) != 1 { + t.Fatalf("invalid files entry count") + } + + if files[0] != "/parentDir/sub111/file1" { + t.Fatal("wrong list of files") + } + err = dirObject.RenameDir("/parentDir/sub111", "/parentDir/subDir2/sub111", podPassword) + if err != nil { + t.Fatal(err) + } + }) +} diff --git a/pkg/dir/rmdir.go b/pkg/dir/rmdir.go index dfe94a33..39365774 100644 --- a/pkg/dir/rmdir.go +++ b/pkg/dir/rmdir.go @@ -24,11 +24,12 @@ import ( ) // RmDir removes a given directory and all the entries (file/directory) under that. -func (d *Directory) RmDir(directoryNameWithPath string) error { +func (d *Directory) RmDir(directoryNameWithPath, podPassword string) error { if directoryNameWithPath == "" { return ErrInvalidDirectoryName } - parentPath := filepath.Dir(directoryNameWithPath) + directoryNameWithPath = filepath.ToSlash(directoryNameWithPath) + parentPath := filepath.ToSlash(filepath.Dir(directoryNameWithPath)) dirToDelete := filepath.Base(directoryNameWithPath) // validation checks of the arguments if parentPath == "." { // skipcq: TCV-001 @@ -40,16 +41,14 @@ func (d *Directory) RmDir(directoryNameWithPath string) error { // check if directory present var totalPath string - if parentPath == "/" && dirToDelete == "/" { + if parentPath == utils.PathSeparator && filepath.ToSlash(dirToDelete) == utils.PathSeparator { totalPath = utils.CombinePathAndFile(parentPath, "") } else { totalPath = utils.CombinePathAndFile(parentPath, dirToDelete) - } if d.GetDirFromDirectoryMap(totalPath) == nil { return ErrDirectoryNotPresent } - // recursive delete dirInode := d.GetDirFromDirectoryMap(totalPath) if dirInode.FileOrDirNames != nil && len(dirInode.FileOrDirNames) > 0 { @@ -57,11 +56,11 @@ func (d *Directory) RmDir(directoryNameWithPath string) error { if strings.HasPrefix(fileOrDirName, "_F_") { fileName := strings.TrimPrefix(fileOrDirName, "_F_") filePath := utils.CombinePathAndFile(directoryNameWithPath, fileName) - err := d.file.RmFile(filePath) + err := d.file.RmFile(filePath, podPassword) if err != nil { // skipcq: TCV-001 return err } - err = d.RemoveEntryFromDir(directoryNameWithPath, fileName, true) + err = d.RemoveEntryFromDir(directoryNameWithPath, podPassword, fileName, true) if err != nil { // skipcq: TCV-001 return err } @@ -70,7 +69,7 @@ func (d *Directory) RmDir(directoryNameWithPath string) error { path := utils.CombinePathAndFile(directoryNameWithPath, dirName) d.logger.Infof(directoryNameWithPath) - err := d.RmDir(path) + err := d.RmDir(path, podPassword) if err != nil { // skipcq: TCV-001 return err } @@ -80,23 +79,23 @@ func (d *Directory) RmDir(directoryNameWithPath string) error { // remove the feed and clear the data structure topic := utils.HashString(totalPath) - _, err := d.fd.UpdateFeed(topic, d.userAddress, []byte(utils.DeletedFeedMagicWord)) + _, err := d.fd.UpdateFeed(topic, d.userAddress, []byte(utils.DeletedFeedMagicWord), []byte(podPassword)) if err != nil { // skipcq: TCV-001 return err } d.RemoveFromDirectoryMap(totalPath) - // return if root directory - if parentPath == "/" && dirToDelete == "/" { + if parentPath == utils.PathSeparator && filepath.ToSlash(dirToDelete) == utils.PathSeparator { return nil } // remove the directory entry from the parent dir - return d.RemoveEntryFromDir(parentPath, dirToDelete, false) + + return d.RemoveEntryFromDir(parentPath, podPassword, dirToDelete, false) } // RmRootDir removes root directory and all the entries (file/directory) under that. -func (d *Directory) RmRootDir() error { - dirToDelete := filepath.Base("/") +func (d *Directory) RmRootDir(podPassword string) error { + dirToDelete := utils.PathSeparator // check if directory present var totalPath = utils.CombinePathAndFile(dirToDelete, "") @@ -112,11 +111,11 @@ func (d *Directory) RmRootDir() error { if strings.HasPrefix(fileOrDirName, "_F_") { fileName := strings.TrimPrefix(fileOrDirName, "_F_") filePath := utils.CombinePathAndFile(dirToDelete, fileName) - err := d.file.RmFile(filePath) + err := d.file.RmFile(filePath, podPassword) if err != nil { // skipcq: TCV-001 return err } - err = d.RemoveEntryFromDir(dirToDelete, fileName, true) + err = d.RemoveEntryFromDir(dirToDelete, podPassword, fileName, true) if err != nil { // skipcq: TCV-001 return err } @@ -125,7 +124,7 @@ func (d *Directory) RmRootDir() error { path := utils.CombinePathAndFile(dirToDelete, dirName) d.logger.Infof(dirToDelete) - err := d.RmDir(path) + err := d.RmDir(path, podPassword) if err != nil { // skipcq: TCV-001 return err } @@ -135,7 +134,7 @@ func (d *Directory) RmRootDir() error { // remove the feed and clear the data structure topic := utils.HashString(totalPath) - _, err := d.fd.UpdateFeed(topic, d.userAddress, []byte(utils.DeletedFeedMagicWord)) + _, err := d.fd.UpdateFeed(topic, d.userAddress, []byte(utils.DeletedFeedMagicWord), []byte(podPassword)) if err != nil { // skipcq: TCV-001 return err } diff --git a/pkg/dir/rmdir_test.go b/pkg/dir/rmdir_test.go index 5aa8e7cb..54c67bcb 100644 --- a/pkg/dir/rmdir_test.go +++ b/pkg/dir/rmdir_test.go @@ -17,10 +17,17 @@ limitations under the License. package dir_test import ( + "context" "errors" "io" "strings" "testing" + "time" + + "github.com/fairdatasociety/fairOS-dfs/pkg/pod" + "github.com/fairdatasociety/fairOS-dfs/pkg/utils" + + "github.com/plexsysio/taskmanager" "github.com/fairdatasociety/fairOS-dfs/pkg/account" bm "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" @@ -34,101 +41,107 @@ func TestRmdir(t *testing.T) { mockClient := bm.NewMockBeeClient() logger := logging.New(io.Discard, 0) acc := account.New(logger) - _, _, err := acc.CreateUserAccount("password", "") + _, _, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } - pod1AccountInfo, err := acc.CreatePodAccount(1, "password", false) + pod1AccountInfo, err := acc.CreatePodAccount(1, false) if err != nil { t.Fatal(err) } + tm := taskmanager.New(1, 10, time.Second*15, logger) + defer func() { + _ = tm.Stop(context.Background()) + }() fd := feed.New(pod1AccountInfo, mockClient, logger) user := acc.GetAddress(1) mockFile := fm.NewMockFile() t.Run("simple-rmdir", func(t *testing.T) { - dirObject := dir.NewDirectory("pod1", mockClient, fd, user, mockFile, logger) + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + dirObject := dir.NewDirectory("pod1", mockClient, fd, user, mockFile, tm, logger) // make root dir so that other directories can be added - err = dirObject.MkRootDir("pod1", user, fd) + err = dirObject.MkRootDir("pod1", podPassword, user, fd) if err != nil { t.Fatal(err) } // create a new dir - err := dirObject.MkDir("/dirToRemove") + err := dirObject.MkDir("/dirToRemove", podPassword) if err != nil { t.Fatal(err) } - err = dirObject.RmDir("") + err = dirObject.RmDir("", podPassword) if !errors.Is(err, dir.ErrInvalidDirectoryName) { t.Fatal("invalid dir name") } - err = dirObject.RmDir("asdasd") + err = dirObject.RmDir("asdasd", podPassword) if !errors.Is(err, dir.ErrInvalidDirectoryName) { t.Fatal("invalid dir name") } - err = dirObject.RmDir("/asdasd") + err = dirObject.RmDir("/asdasd", podPassword) if !errors.Is(err, dir.ErrDirectoryNotPresent) { t.Fatal("dir not present") } // now delete the directory - err = dirObject.RmDir("/dirToRemove") + err = dirObject.RmDir("/dirToRemove", podPassword) if err != nil { t.Fatal(err) } // verify if the directory is actually removed - dirEntry, _, err := dirObject.ListDir("/") + dirEntry, _, err := dirObject.ListDir("/", podPassword) if err != nil { t.Fatal(err) } - if dirEntry != nil { + if len(dirEntry) != 0 { t.Fatalf("could not delete directory") } - err = dirObject.RmDir("/") + err = dirObject.RmDir("/", podPassword) if err != nil { t.Fatal(err) } }) t.Run("nested-rmdir", func(t *testing.T) { - dirObject := dir.NewDirectory("pod1", mockClient, fd, user, mockFile, logger) + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + dirObject := dir.NewDirectory("pod1", mockClient, fd, user, mockFile, tm, logger) // make root dir so that other directories can be added - err = dirObject.MkRootDir("pod1", user, fd) + err = dirObject.MkRootDir("pod1", podPassword, user, fd) if err != nil { t.Fatal(err) } // create a new dir - err := dirObject.MkDir("/dirToRemove1") + err := dirObject.MkDir("/dirToRemove1", podPassword) if err != nil { t.Fatal(err) } // create a new dir - err = dirObject.MkDir("/dirToRemove1/dirToRemove2") + err = dirObject.MkDir("/dirToRemove1/dirToRemove2", podPassword) if err != nil { t.Fatal(err) } // create a new dir - err = dirObject.MkDir("/dirToRemove1/dirToRemove2/dirToRemove") + err = dirObject.MkDir("/dirToRemove1/dirToRemove2/dirToRemove", podPassword) if err != nil { t.Fatal(err) } // make sure directories were created - dirEntry, _, err := dirObject.ListDir("/dirToRemove1") + dirEntry, _, err := dirObject.ListDir("/dirToRemove1", podPassword) if err != nil { t.Fatal(err) } if dirEntry == nil { t.Fatal("nested directory \"/dirToRemove1/dirToRemove2\" was not created") } - dirEntry, _, err = dirObject.ListDir("/dirToRemove1/dirToRemove2") + dirEntry, _, err = dirObject.ListDir("/dirToRemove1/dirToRemove2", podPassword) if err != nil { t.Fatal(err) } @@ -137,17 +150,17 @@ func TestRmdir(t *testing.T) { } // now delete the directory - err = dirObject.RmDir("/dirToRemove1") + err = dirObject.RmDir("/dirToRemove1", podPassword) if err != nil { t.Fatal(err) } // verify if the directory is actually removed - dirEntry, _, err = dirObject.ListDir("/") + dirEntry, _, err = dirObject.ListDir("/", podPassword) if err != nil { t.Fatal(err) } - if dirEntry != nil { + if len(dirEntry) != 0 { t.Fatalf("could not delete directory") } }) @@ -157,52 +170,56 @@ func TestRmRootDirByPath(t *testing.T) { mockClient := bm.NewMockBeeClient() logger := logging.New(io.Discard, 0) acc := account.New(logger) - _, _, err := acc.CreateUserAccount("password", "") + _, _, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } - pod1AccountInfo, err := acc.CreatePodAccount(1, "password", false) + pod1AccountInfo, err := acc.CreatePodAccount(1, false) if err != nil { t.Fatal(err) } fd := feed.New(pod1AccountInfo, mockClient, logger) user := acc.GetAddress(1) mockFile := fm.NewMockFile() - + tm := taskmanager.New(1, 10, time.Second*15, logger) + defer func() { + _ = tm.Stop(context.Background()) + }() t.Run("rmrootdir", func(t *testing.T) { - dirObject := dir.NewDirectory("pod1", mockClient, fd, user, mockFile, logger) + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + dirObject := dir.NewDirectory("pod1", mockClient, fd, user, mockFile, tm, logger) // make root dir so that other directories can be added - err = dirObject.MkRootDir("pod1", user, fd) + err = dirObject.MkRootDir("pod1", podPassword, user, fd) if err != nil { t.Fatal(err) } // create a new dir - err := dirObject.MkDir("/dirToRemove1") + err := dirObject.MkDir("/dirToRemove1", podPassword) if err != nil { t.Fatal(err) } // create a new dir - err = dirObject.MkDir("/dirToRemove1/dirToRemove2") + err = dirObject.MkDir("/dirToRemove1/dirToRemove2", podPassword) if err != nil { t.Fatal(err) } // create a new dir - err = dirObject.MkDir("/dirToRemove1/dirToRemove2/dirToRemove") + err = dirObject.MkDir("/dirToRemove1/dirToRemove2/dirToRemove", podPassword) if err != nil { t.Fatal(err) } // make sure directories were created - dirEntry, _, err := dirObject.ListDir("/dirToRemove1") + dirEntry, _, err := dirObject.ListDir("/dirToRemove1", podPassword) if err != nil { t.Fatal(err) } if dirEntry == nil { t.Fatal("nested directory \"/dirToRemove1/dirToRemove2\" was not created") } - dirEntry, _, err = dirObject.ListDir("/dirToRemove1/dirToRemove2") + dirEntry, _, err = dirObject.ListDir("/dirToRemove1/dirToRemove2", podPassword) if err != nil { t.Fatal(err) } @@ -211,11 +228,11 @@ func TestRmRootDirByPath(t *testing.T) { } fileName := "file1" - err = dirObject.AddEntryToDir("/dirToRemove1", fileName, true) + err = dirObject.AddEntryToDir("/dirToRemove1", podPassword, fileName, true) if err != nil { t.Fatal(err) } - _, fileEntry, err := dirObject.ListDir("/dirToRemove1") + _, fileEntry, err := dirObject.ListDir("/dirToRemove1", podPassword) if err != nil { t.Fatal(err) } @@ -223,13 +240,13 @@ func TestRmRootDirByPath(t *testing.T) { t.Fatal("there should a file entry") } // now delete the root directory - err = dirObject.RmDir("/") + err = dirObject.RmDir("/", podPassword) if err != nil { t.Fatal(err) } // verify if the directory is actually removed - dirEntry, _, err = dirObject.ListDir("/") + dirEntry, _, err = dirObject.ListDir("/", podPassword) if err != nil && !strings.HasSuffix(err.Error(), dir.ErrResourceDeleted.Error()) { t.Fatal("root directory was not deleted") } @@ -243,39 +260,44 @@ func TestRmRootDir(t *testing.T) { mockClient := bm.NewMockBeeClient() logger := logging.New(io.Discard, 0) acc := account.New(logger) - _, _, err := acc.CreateUserAccount("password", "") + _, _, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } - pod1AccountInfo, err := acc.CreatePodAccount(1, "password", false) + pod1AccountInfo, err := acc.CreatePodAccount(1, false) if err != nil { t.Fatal(err) } + tm := taskmanager.New(1, 10, time.Second*15, logger) + defer func() { + _ = tm.Stop(context.Background()) + }() fd := feed.New(pod1AccountInfo, mockClient, logger) user := acc.GetAddress(1) mockFile := fm.NewMockFile() t.Run("rmrootdir", func(t *testing.T) { - dirObject := dir.NewDirectory("pod1", mockClient, fd, user, mockFile, logger) + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + dirObject := dir.NewDirectory("pod1", mockClient, fd, user, mockFile, tm, logger) // make root dir so that other directories can be added - err = dirObject.MkRootDir("pod1", user, fd) + err = dirObject.MkRootDir("pod1", podPassword, user, fd) if err != nil { t.Fatal(err) } // create a new dir - err := dirObject.MkDir("/dirToRemove1") + err := dirObject.MkDir("/dirToRemove1", podPassword) if err != nil { t.Fatal(err) } // create a new dir - err = dirObject.MkDir("/dirToRemove1/dirToRemove2") + err = dirObject.MkDir("/dirToRemove1/dirToRemove2", podPassword) if err != nil { t.Fatal(err) } // create a new dir - err = dirObject.MkDir("/dirToRemove1/dirToRemove2/dirToRemove") + err = dirObject.MkDir("/dirToRemove1/dirToRemove2/dirToRemove", podPassword) if err != nil { t.Fatal(err) } @@ -285,14 +307,14 @@ func TestRmRootDir(t *testing.T) { } // make sure directories were created - dirEntry, _, err := dirObject.ListDir("/dirToRemove1") + dirEntry, _, err := dirObject.ListDir("/dirToRemove1", podPassword) if err != nil { t.Fatal(err) } if dirEntry == nil { t.Fatal("nested directory \"/dirToRemove1/dirToRemove2\" was not created") } - dirEntry, _, err = dirObject.ListDir("/dirToRemove1/dirToRemove2") + dirEntry, _, err = dirObject.ListDir("/dirToRemove1/dirToRemove2", podPassword) if err != nil { t.Fatal(err) } @@ -301,11 +323,11 @@ func TestRmRootDir(t *testing.T) { } fileName := "file1" - err = dirObject.AddEntryToDir("/", fileName, true) + err = dirObject.AddEntryToDir("/", podPassword, fileName, true) if err != nil { t.Fatal(err) } - _, fileEntry, err := dirObject.ListDir("/") + _, fileEntry, err := dirObject.ListDir("/", podPassword) if err != nil { t.Fatal(err) } @@ -314,13 +336,13 @@ func TestRmRootDir(t *testing.T) { } // now delete the root directory - err = dirObject.RmRootDir() + err = dirObject.RmRootDir(podPassword) if err != nil { t.Fatal(err) } // verify if the directory is actually removed - dirEntry, _, err = dirObject.ListDir("/") + dirEntry, _, err = dirObject.ListDir("/", podPassword) if err != nil && !strings.HasSuffix(err.Error(), dir.ErrResourceDeleted.Error()) { t.Fatal("root directory was not deleted") } diff --git a/pkg/dir/stat.go b/pkg/dir/stat.go index b7b6dda2..fdb2db16 100644 --- a/pkg/dir/stat.go +++ b/pkg/dir/stat.go @@ -27,20 +27,20 @@ import ( // Stats represents a given directory type Stats struct { - PodName string `json:"pod_name"` - DirPath string `json:"dir_path"` - DirName string `json:"dir_name"` - CreationTime string `json:"creation_time"` - ModificationTime string `json:"modification_time"` - AccessTime string `json:"access_time"` - NoOfDirectories string `json:"no_of_directories"` - NoOfFiles string `json:"no_of_files"` + PodName string `json:"podName"` + DirPath string `json:"dirPath"` + DirName string `json:"dirName"` + CreationTime string `json:"creationTime"` + ModificationTime string `json:"modificationTime"` + AccessTime string `json:"accessTime"` + NoOfDirectories string `json:"noOfDirectories"` + NoOfFiles string `json:"noOfFiles"` } // DirStat returns all the information related to a given directory. -func (d *Directory) DirStat(podName, dirNameWithPath string) (*Stats, error) { +func (d *Directory) DirStat(podName, podPassword, dirNameWithPath string) (*Stats, error) { topic := utils.HashString(dirNameWithPath) - _, data, err := d.fd.GetFeedData(topic, d.getAddress()) + _, data, err := d.fd.GetFeedData(topic, d.getAddress(), []byte(podPassword)) if err != nil { // skipcq: TCV-001 return nil, fmt.Errorf("dir stat: %v", err) } diff --git a/pkg/dir/stat_test.go b/pkg/dir/stat_test.go index 5dc5fd24..5bd0638b 100644 --- a/pkg/dir/stat_test.go +++ b/pkg/dir/stat_test.go @@ -17,10 +17,17 @@ limitations under the License. package dir_test import ( + "context" "errors" "io" "strconv" "testing" + "time" + + "github.com/fairdatasociety/fairOS-dfs/pkg/pod" + "github.com/fairdatasociety/fairOS-dfs/pkg/utils" + + "github.com/plexsysio/taskmanager" "github.com/fairdatasociety/fairOS-dfs/pkg/account" bm "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" @@ -34,52 +41,56 @@ func TestStat(t *testing.T) { mockClient := bm.NewMockBeeClient() logger := logging.New(io.Discard, 0) acc := account.New(logger) - _, _, err := acc.CreateUserAccount("password", "") + _, _, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } - pod1AccountInfo, err := acc.CreatePodAccount(1, "password", false) + pod1AccountInfo, err := acc.CreatePodAccount(1, false) if err != nil { t.Fatal(err) } fd := feed.New(pod1AccountInfo, mockClient, logger) user := acc.GetAddress(1) mockFile := fm.NewMockFile() - + tm := taskmanager.New(1, 10, time.Second*15, logger) + defer func() { + _ = tm.Stop(context.Background()) + }() t.Run("stat-dir", func(t *testing.T) { - dirObject := dir.NewDirectory("pod1", mockClient, fd, user, mockFile, logger) + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + dirObject := dir.NewDirectory("pod1", mockClient, fd, user, mockFile, tm, logger) // make root dir so that other directories can be added - err = dirObject.MkRootDir("pod1", user, fd) + err = dirObject.MkRootDir("pod1", podPassword, user, fd) if err != nil { t.Fatal(err) } // populate the directory with few directory and files - err := dirObject.MkDir("/dirToStat") + err := dirObject.MkDir("/dirToStat", podPassword) if err != nil { t.Fatal(err) } - err = dirObject.MkDir("/dirToStat/subDir1") + err = dirObject.MkDir("/dirToStat/subDir1", podPassword) if err != nil { t.Fatal(err) } - err = dirObject.MkDir("/dirToStat/subDir2") + err = dirObject.MkDir("/dirToStat/subDir2", podPassword) if err != nil { t.Fatal(err) } // just add dummy file enty as file listing is not tested here - err = dirObject.AddEntryToDir("/dirToStat", "file1", true) + err = dirObject.AddEntryToDir("/dirToStat", podPassword, "file1", true) if err != nil { t.Fatal(err) } - err = dirObject.AddEntryToDir("/dirToStat", "file2", true) + err = dirObject.AddEntryToDir("/dirToStat", podPassword, "file2", true) if err != nil { t.Fatal(err) } // stat the directory - dirStats, err := dirObject.DirStat("pod1", "/dirToStat") + dirStats, err := dirObject.DirStat("pod1", podPassword, "/dirToStat") if err != nil { t.Fatal(err) } @@ -104,12 +115,12 @@ func TestStat(t *testing.T) { t.Fatalf("invalid files count") } - err = dirObject.RmDir("/dirToStat") + err = dirObject.RmDir("/dirToStat", podPassword) if err != nil { t.Fatal(err) } - _, err = dirObject.DirStat("pod1", "/dirToStat") + _, err = dirObject.DirStat("pod1", podPassword, "/dirToStat") if !errors.Is(err, dir.ErrDirectoryNotPresent) { t.Fatal("dir should not be present") } diff --git a/pkg/dir/sync.go b/pkg/dir/sync.go index 80a3d6df..eccc6118 100644 --- a/pkg/dir/sync.go +++ b/pkg/dir/sync.go @@ -17,15 +17,17 @@ limitations under the License. package dir import ( + "context" "strings" + "sync" "github.com/fairdatasociety/fairOS-dfs/pkg/utils" ) // SyncDirectory syncs all the latest entries under a given directory. -func (d *Directory) SyncDirectory(dirNameWithPath string) error { +func (d *Directory) SyncDirectory(dirNameWithPath, podPassword string) error { topic := utils.HashString(utils.CombinePathAndFile(dirNameWithPath, "")) - _, data, err := d.fd.GetFeedData(topic, d.userAddress) + _, data, err := d.fd.GetFeedData(topic, d.userAddress, []byte(podPassword)) if err != nil { // skipcq: TCV-001 return nil // pod is empty } @@ -41,17 +43,56 @@ func (d *Directory) SyncDirectory(dirNameWithPath string) error { if strings.HasPrefix(fileOrDirName, "_F_") { fileName := strings.TrimPrefix(fileOrDirName, "_F_") filePath := utils.CombinePathAndFile(dirNameWithPath, fileName) - err := d.file.LoadFileMeta(filePath) + err := d.file.LoadFileMeta(filePath, podPassword) + if err != nil { // skipcq: TCV-001 + d.logger.Errorf("loading metadata failed %s: %s", filePath, err.Error()) + } + } else if strings.HasPrefix(fileOrDirName, "_D_") { + dirName := strings.TrimPrefix(fileOrDirName, "_D_") + path := utils.CombinePathAndFile(dirNameWithPath, dirName) + d.logger.Infof(dirNameWithPath) + + err = d.SyncDirectory(path, podPassword) if err != nil { // skipcq: TCV-001 return err } + } + } + return nil +} + +// SyncDirectoryAsync syncs all the latest entries under a given directory concurrently. +func (d *Directory) SyncDirectoryAsync(ctx context.Context, dirNameWithPath, podPassword string, wg *sync.WaitGroup) error { + topic := utils.HashString(utils.CombinePathAndFile(dirNameWithPath, "")) + _, data, err := d.fd.GetFeedData(topic, d.userAddress, []byte(podPassword)) + if err != nil { // skipcq: TCV-001 + return nil // pod is empty + } + var dirInode Inode + err = dirInode.Unmarshal(data) + if err != nil { // skipcq: TCV-001 + d.logger.Errorf("dir sync: %v", err) + return err + } + + d.AddToDirectoryMap(dirNameWithPath, &dirInode) + for _, fileOrDirName := range dirInode.FileOrDirNames { + if strings.HasPrefix(fileOrDirName, "_F_") { + wg.Add(1) + fileName := strings.TrimPrefix(fileOrDirName, "_F_") + filePath := utils.CombinePathAndFile(dirNameWithPath, fileName) + syncTask := newSyncTask(d, filePath, podPassword, wg) + _, err = d.syncManager.Go(syncTask) + if err != nil { // skipcq: TCV-001 + return err + } } else if strings.HasPrefix(fileOrDirName, "_D_") { dirName := strings.TrimPrefix(fileOrDirName, "_D_") path := utils.CombinePathAndFile(dirNameWithPath, dirName) d.logger.Infof(dirNameWithPath) - err = d.SyncDirectory(path) + err = d.SyncDirectoryAsync(ctx, path, podPassword, wg) if err != nil { // skipcq: TCV-001 return err } diff --git a/pkg/dir/sync_test.go b/pkg/dir/sync_test.go index 79da37df..a2abb24c 100644 --- a/pkg/dir/sync_test.go +++ b/pkg/dir/sync_test.go @@ -17,8 +17,15 @@ limitations under the License. package dir_test import ( + "context" "io" + "sync" "testing" + "time" + + "github.com/fairdatasociety/fairOS-dfs/pkg/pod" + + "github.com/plexsysio/taskmanager" "github.com/fairdatasociety/fairOS-dfs/pkg/account" bm "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" @@ -33,57 +40,61 @@ func TestSync(t *testing.T) { mockClient := bm.NewMockBeeClient() logger := logging.New(io.Discard, 0) acc := account.New(logger) - _, _, err := acc.CreateUserAccount("password", "") + _, _, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } - pod1AccountInfo, err := acc.CreatePodAccount(1, "password", false) + pod1AccountInfo, err := acc.CreatePodAccount(1, false) if err != nil { t.Fatal(err) } fd := feed.New(pod1AccountInfo, mockClient, logger) user := acc.GetAddress(1) mockFile := fm.NewMockFile() + tm := taskmanager.New(1, 10, time.Second*15, logger) t.Run("sync-dir", func(t *testing.T) { - dirObject := dir.NewDirectory("pod1", mockClient, fd, user, mockFile, logger) + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + dirObject := dir.NewDirectory("pod1", mockClient, fd, user, mockFile, tm, logger) // make root dir so that other directories can be added - err = dirObject.MkRootDir("pod1", user, fd) + err = dirObject.MkRootDir("pod1", podPassword, user, fd) if err != nil { t.Fatal(err) } // populate the directory with few directory and files - err := dirObject.MkDir("/dirToStat") + err := dirObject.MkDir("/dirToStat", podPassword) if err != nil { t.Fatal(err) } - err = dirObject.MkDir("/dirToStat/subDir1") + err = dirObject.MkDir("/dirToStat/subDir1", podPassword) if err != nil { t.Fatal(err) } - err = dirObject.MkDir("/dirToStat/subDir2") + err = dirObject.MkDir("/dirToStat/subDir2", podPassword) if err != nil { t.Fatal(err) } // just add dummy file enty as file listing is not tested here - err = dirObject.AddEntryToDir("/dirToStat", "file1", true) + err = dirObject.AddEntryToDir("/dirToStat", podPassword, "file1", true) if err != nil { t.Fatal(err) } - err = dirObject.AddEntryToDir("/dirToStat", "file2", true) + err = dirObject.AddEntryToDir("/dirToStat", podPassword, "file2", true) if err != nil { t.Fatal(err) } - dirObject2 := dir.NewDirectory("pod1", mockClient, fd, user, mockFile, logger) + dirObject2 := dir.NewDirectory("pod1", mockClient, fd, user, mockFile, tm, logger) if dirObject2.GetDirFromDirectoryMap("/") != nil { t.Fatal("it should be nil before sync") } - err = dirObject2.SyncDirectory("/") + wg := new(sync.WaitGroup) + err = dirObject2.SyncDirectoryAsync(context.Background(), "/", podPassword, wg) if err != nil { t.Fatal(err) } + wg.Wait() node := dirObject2.GetDirFromDirectoryMap("/") if node.GetDirInodePathAndNameForRoot() != utils.PathSeparator { t.Fatal("node is root node") diff --git a/pkg/ensm/eth/eth.go b/pkg/ensm/eth/eth.go index 53b1eb7c..f6ccd740 100644 --- a/pkg/ensm/eth/eth.go +++ b/pkg/ensm/eth/eth.go @@ -33,7 +33,7 @@ const ( var ( minRequiredBalance = big.NewInt(10000000000000000) // 0.01 eth - //ErrWrongChainID denotes the rpc endpoint returned different chainId than the configured one + // ErrWrongChainID denotes the rpc endpoint returned different chainId than the configured one ErrWrongChainID = fmt.Errorf("chainID does not match or not supported") ) diff --git a/pkg/feed/api.go b/pkg/feed/api.go index 0f0a4c11..66ecdda7 100644 --- a/pkg/feed/api.go +++ b/pkg/feed/api.go @@ -59,7 +59,7 @@ type API struct { // request is a custom type that involves in the fairOS feed creation type request struct { ID - //User utils.Address + // User utils.Address idAddr swarm.Address // cached chunk address for the update (not serialized, for internal use) data []byte // actual data payload @@ -80,7 +80,7 @@ func New(accountInfo *account.Info, client blockstore.Client, logger logging.Log // CreateFeed creates a feed by constructing a single owner chunk. This chunk // can only be accessed if the pod address is known. Also, no one else can spoof this // chunk since this is signed by the pod. -func (a *API) CreateFeed(topic []byte, user utils.Address, data []byte) ([]byte, error) { +func (a *API) CreateFeed(topic []byte, user utils.Address, data []byte, encryptionPassword []byte) ([]byte, error) { var req request if a.accountInfo.GetPrivateKey() == nil { @@ -95,6 +95,16 @@ func (a *API) CreateFeed(topic []byte, user utils.Address, data []byte) ([]byte, return nil, ErrInvalidPayloadSize } + var err error + + encryptedData := data + if encryptionPassword != nil { // skipcq: TCV-001 + encryptedData, err = utils.EncryptBytes(encryptionPassword, data) + if err != nil { // skipcq: TCV-001 + return nil, err + } + } + // fill Feed and Epoc related details copy(req.ID.Topic[:], topic) req.ID.User = user @@ -102,7 +112,7 @@ func (a *API) CreateFeed(topic []byte, user utils.Address, data []byte) ([]byte, req.Epoch.Time = uint64(time.Now().Unix()) // Add initial feed data - req.data = data + req.data = encryptedData // create the id, hash(topic, epoc) id, err := a.handler.getId(req.Topic, req.Time, req.Level) @@ -111,14 +121,14 @@ func (a *API) CreateFeed(topic []byte, user utils.Address, data []byte) ([]byte, } // get the payload id BMT(span, payload) - payloadId, err := a.handler.getPayloadId(data) + payloadId, err := a.handler.getPayloadId(encryptedData) if err != nil { // skipcq: TCV-001 return nil, err } // create the signer and the content addressed chunk signer := crypto.NewDefaultSigner(a.accountInfo.GetPrivateKey()) - ch, err := utils.NewChunkWithSpan(data) + ch, err := utils.NewChunkWithSpan(encryptedData) if err != nil { // skipcq: TCV-001 return nil, err } @@ -213,7 +223,7 @@ func (a *API) GetSOCFromAddress(address []byte) ([]byte, error) { } // GetFeedData looks up feed from swarm -func (a *API) GetFeedData(topic []byte, user utils.Address) ([]byte, []byte, error) { +func (a *API) GetFeedData(topic []byte, user utils.Address, encryptionPassword []byte) ([]byte, []byte, error) { if len(topic) != TopicLength { return nil, nil, ErrInvalidTopicSize } @@ -230,8 +240,14 @@ func (a *API) GetFeedData(topic []byte, user utils.Address) ([]byte, []byte, err if err != nil { return nil, nil, err } - var data []byte - addr, data, err := a.handler.GetContent(&q.Feed) + addr, encryptedData, err := a.handler.GetContent(&q.Feed) + if err != nil { // skipcq: TCV-001 + return nil, nil, err + } + if encryptionPassword == nil || string(encryptedData) == utils.DeletedFeedMagicWord { + return addr.Bytes(), encryptedData, nil + } + data, err := utils.DecryptBytes(encryptionPassword, encryptedData) if err != nil { // skipcq: TCV-001 return nil, nil, err } @@ -264,7 +280,7 @@ func (a *API) GetFeedDataFromTopic(topic []byte, user utils.Address) ([]byte, [] } // UpdateFeed updates the contents of an already created feed. -func (a *API) UpdateFeed(topic []byte, user utils.Address, data []byte) ([]byte, error) { +func (a *API) UpdateFeed(topic []byte, user utils.Address, data []byte, encryptionPassword []byte) ([]byte, error) { if a.accountInfo.GetPrivateKey() == nil { return nil, ErrReadOnlyFeed } @@ -277,6 +293,16 @@ func (a *API) UpdateFeed(topic []byte, user utils.Address, data []byte) ([]byte, return nil, ErrInvalidPayloadSize } + var err error + + encryptedData := data + if encryptionPassword != nil && string(data) != utils.DeletedFeedMagicWord { + encryptedData, err = utils.EncryptBytes(encryptionPassword, data) + if err != nil { // skipcq: TCV-001 + return nil, err + } + } + ctx := context.Background() f := new(Feed) f.User = user @@ -288,7 +314,7 @@ func (a *API) UpdateFeed(topic []byte, user utils.Address, data []byte) ([]byte, return nil, err } req.Time = uint64(time.Now().Unix()) - req.data = data + req.data = encryptedData // create the id, hash(topic, epoc) id, err := a.handler.getId(req.Topic, req.Time, req.Level) @@ -297,14 +323,14 @@ func (a *API) UpdateFeed(topic []byte, user utils.Address, data []byte) ([]byte, } // get the payload id BMT(span, payload) - payloadId, err := a.handler.getPayloadId(data) + payloadId, err := a.handler.getPayloadId(encryptedData) if err != nil { // skipcq: TCV-001 return nil, err } // create the signer and the content addressed chunk signer := crypto.NewDefaultSigner(a.accountInfo.GetPrivateKey()) - ch, err := utils.NewChunkWithSpan(data) + ch, err := utils.NewChunkWithSpan(encryptedData) if err != nil { // skipcq: TCV-001 return nil, err } @@ -349,7 +375,7 @@ func (a *API) DeleteFeed(topic []byte, user utils.Address) error { return ErrReadOnlyFeed } - delRef, _, err := a.GetFeedData(topic, user) + delRef, _, err := a.GetFeedData(topic, user, nil) if err != nil && err.Error() != "feed does not exist or was not updated yet" { // skipcq: TCV-001 return err } diff --git a/pkg/feed/feed_test.go b/pkg/feed/feed_test.go index c94b801e..bd82574c 100644 --- a/pkg/feed/feed_test.go +++ b/pkg/feed/feed_test.go @@ -34,7 +34,7 @@ func TestFeed(t *testing.T) { logger := logging.New(io.Discard, 0) acc1 := account.New(logger) - _, _, err := acc1.CreateUserAccount("password", "") + _, _, err := acc1.CreateUserAccount("") if err != nil { t.Fatal(err) } @@ -46,18 +46,18 @@ func TestFeed(t *testing.T) { fd := New(accountInfo1, client, logger) topic := utils.HashString("topic1") data := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} - addr, err := fd.CreateFeed(topic, user1, data) + addr, err := fd.CreateFeed(topic, user1, data, nil) if err != nil { t.Fatal(err) } longTopic := append(topic, topic...) // skipcq: CRT-D0001 - _, _, err = fd.GetFeedData(longTopic, user1) + _, _, err = fd.GetFeedData(longTopic, user1, nil) if !errors.Is(err, ErrInvalidTopicSize) { t.Fatal("invalid topic size") } // check if the data and address is present and is same as stored - rcvdAddr, rcvdData, err := fd.GetFeedData(topic, user1) + rcvdAddr, rcvdData, err := fd.GetFeedData(topic, user1, nil) if err != nil { t.Fatal(err) } @@ -72,7 +72,7 @@ func TestFeed(t *testing.T) { t.Run("create-from-user1-read-from-user2-with-user1-address", func(t *testing.T) { // create account2 acc2 := account.New(logger) - _, _, err = acc2.CreateUserAccount("password", "") + _, _, err = acc2.CreateUserAccount("") if err != nil { t.Fatal(err) } @@ -82,14 +82,14 @@ func TestFeed(t *testing.T) { fd1 := New(accountInfo1, client, logger) topic := utils.HashString("topic1") data := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} - addr, err := fd1.CreateFeed(topic, user1, data) + addr, err := fd1.CreateFeed(topic, user1, data, nil) if err != nil { t.Fatal(err) } // check if you can read the data from user2 fd2 := New(accountInfo2, client, logger) - rcvdAddr, rcvdData, err := fd2.GetFeedData(topic, user1) + rcvdAddr, rcvdData, err := fd2.GetFeedData(topic, user1, nil) if err != nil { t.Fatal(err) } @@ -106,7 +106,7 @@ func TestFeed(t *testing.T) { topic := utils.HashString("topic2") // check if the data and address is present and is same as stored - _, _, err := fd.GetFeedData(topic, user1) + _, _, err := fd.GetFeedData(topic, user1, nil) if err != nil && err.Error() != "feed does not exist or was not updated yet" { t.Fatal(err) } @@ -115,7 +115,7 @@ func TestFeed(t *testing.T) { t.Run("create-from-user1-read-from-user2-with-user2-address", func(t *testing.T) { // create account2 acc2 := account.New(logger) - _, _, err = acc2.CreateUserAccount("password", "") + _, _, err = acc2.CreateUserAccount("") if err != nil { t.Fatal(err) } @@ -126,14 +126,14 @@ func TestFeed(t *testing.T) { fd1 := New(accountInfo1, client, logger) topic := utils.HashString("topic1") data := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} - _, err := fd1.CreateFeed(topic, user1, data) + _, err := fd1.CreateFeed(topic, user1, data, nil) if err != nil { t.Fatal(err) } // check if you can read the data from user2 fd2 := New(accountInfo2, client, logger) - rcvdAddr, rcvdData, err := fd2.GetFeedData(topic, user2) + rcvdAddr, rcvdData, err := fd2.GetFeedData(topic, user2, nil) if err != nil && err.Error() != "feed does not exist or was not updated yet" { t.Fatal(err) } @@ -146,7 +146,7 @@ func TestFeed(t *testing.T) { fd := New(accountInfo1, client, logger) topic := utils.HashString("topic3") data := []byte{0} - _, err = fd.CreateFeed(topic, user1, data) + _, err = fd.CreateFeed(topic, user1, data, nil) if err != nil { t.Fatal(err) } @@ -155,11 +155,11 @@ func TestFeed(t *testing.T) { buf := make([]byte, 4) binary.LittleEndian.PutUint16(buf, uint16(i)) - _, err = fd.UpdateFeed(topic, user1, buf) + _, err = fd.UpdateFeed(topic, user1, buf, nil) if err != nil { t.Fatal(err) } - getAddr, rcvdData, err := fd.GetFeedData(topic, user1) + getAddr, rcvdData, err := fd.GetFeedData(topic, user1, nil) if err != nil { t.Fatal(err) } @@ -226,13 +226,13 @@ func TestFeed(t *testing.T) { topic := utils.HashString("feed-topic1") data := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} - _, err = nilFd.CreateFeed(topic, user1, data) + _, err = nilFd.CreateFeed(topic, user1, data, nil) if !errors.Is(err, ErrReadOnlyFeed) { t.Fatal("read only feed") } longTopic := append(topic, topic...) // skipcq: CRT-D0001 - _, err = fd.CreateFeed(longTopic, user1, data) + _, err = fd.CreateFeed(longTopic, user1, data, nil) if !errors.Is(err, ErrInvalidTopicSize) { t.Fatal("invalid topic size") } @@ -241,7 +241,7 @@ func TestFeed(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = fd.CreateFeed(topic, user1, longData) + _, err = fd.CreateFeed(topic, user1, longData, nil) if !errors.Is(err, ErrInvalidPayloadSize) { t.Fatal("invalid payload size") } @@ -282,13 +282,13 @@ func TestFeed(t *testing.T) { topic := utils.HashString("feed-topic1") data := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} - _, err = nilFd.UpdateFeed(topic, user1, data) + _, err = nilFd.UpdateFeed(topic, user1, data, nil) if !errors.Is(err, ErrReadOnlyFeed) { t.Fatal("read only feed") } longTopic := append(topic, topic...) // skipcq: CRT-D0001 - _, err = fd.UpdateFeed(longTopic, user1, data) + _, err = fd.UpdateFeed(longTopic, user1, data, nil) if !errors.Is(err, ErrInvalidTopicSize) { t.Fatal("invalid topic size") } @@ -297,7 +297,7 @@ func TestFeed(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = fd.UpdateFeed(topic, user1, longData) + _, err = fd.UpdateFeed(topic, user1, longData, nil) if !errors.Is(err, ErrInvalidPayloadSize) { t.Fatal("invalid payload size") } @@ -315,7 +315,7 @@ func TestFeed(t *testing.T) { t.Fatal("read only feed") } - _, err = fd.CreateFeed(topic, user1, data) + _, err = fd.CreateFeed(topic, user1, data, nil) if err != nil { t.Fatal(err) } diff --git a/pkg/feed/handler.go b/pkg/feed/handler.go index 73cd9792..abe030d6 100644 --- a/pkg/feed/handler.go +++ b/pkg/feed/handler.go @@ -287,7 +287,11 @@ func (h *Handler) NewRequest(ctx context.Context, feed *Feed) (request2 *request feedUpdate, err := h.Lookup(ctx, query) if err != nil { - if err.(*Error).code != errNotFound { + feedErr, ok := err.(*Error) + if !ok { + return nil, err + } + if feedErr.code != errNotFound { return nil, err } // not finding updates means that there is a network error diff --git a/pkg/feed/lookup/algorithm_longearth.go b/pkg/feed/lookup/algorithm_longearth.go index ba98abe2..8657438f 100644 --- a/pkg/feed/lookup/algorithm_longearth.go +++ b/pkg/feed/lookup/algorithm_longearth.go @@ -83,7 +83,7 @@ func LongEarthAlgorithm(ctx context.Context, now uint64, hint Epoch, read ReadFu valueBMu.Unlock() } - go func() { //goroutine to read the current epoch (R) + go func() { // goroutine to read the current epoch (R) defer cancelR() var err error valuer, err := read(ctxR, epoch, now) // read this epoch @@ -94,7 +94,7 @@ func LongEarthAlgorithm(ctx context.Context, now uint64, hint Epoch, read ReadFu cancelA() } else { cancelB() - //cancelA() // cancel this also for faster eject + // cancelA() // cancel this also for faster eject } if err != nil && !errors.Is(err, context.Canceled) { // skipcq: TCV-001 gerr = err diff --git a/pkg/feed/lookup/lookup.go b/pkg/feed/lookup/lookup.go index 0b0e2a26..8cdd346e 100644 --- a/pkg/feed/lookup/lookup.go +++ b/pkg/feed/lookup/lookup.go @@ -37,7 +37,7 @@ const HighestLevel = 31 // DefaultLevel sets what level will be chosen to search when there is no hint const DefaultLevel = HighestLevel -//Algorithm is the function signature of a lookup algorithm +// Algorithm is the function signature of a lookup algorithm type Algorithm func(ctx context.Context, now uint64, hint Epoch, read ReadFunc) (value interface{}, err error) // Lookup finds the update with the highest timestamp that is smaller or equal than 'now' @@ -135,5 +135,5 @@ func GetFirstEpoch(now uint64) Epoch { var worstHint = Epoch{Time: 0, Level: 63} var trace = func(id int32, formatString string, a ...interface{}) { - //fmt.Printf("Step ID #%d "+formatString+"\n", append([]interface{}{id}, a...)...) + // fmt.Printf("Step ID #%d "+formatString+"\n", append([]interface{}{id}, a...)...) } diff --git a/pkg/feed/lookup/lookup_test.go b/pkg/feed/lookup/lookup_test.go index 5948c92c..4fe9bc96 100644 --- a/pkg/feed/lookup/lookup_test.go +++ b/pkg/feed/lookup/lookup_test.go @@ -72,7 +72,7 @@ func TestLookup(t *testing.T) { for i := uint64(0); i < 12; i++ { t := now - Year*3 + i*Month data := Data{ - Payload: t, //our "payload" will be the timestamp itself. + Payload: t, // our "payload" will be the timestamp itself. Time: t, } epoch = store.Update(epoch, t, &data) @@ -154,7 +154,7 @@ func TestOneUpdateAt0(t *testing.T) { Payload: 79, Time: 0, } - store.Update(epoch, 0, &data) //place 1 update in t=0 + store.Update(epoch, 0, &data) // place 1 update in t=0 // ### 2.- Test all algorithms for _, algo := range algorithms { @@ -288,7 +288,7 @@ func TestContextCancellation(t *testing.T) { errc <- err }() - cancel() //actually cancel the lookup + cancel() // actually cancel the lookup if err := <-errc; err != context.Canceled { t.Fatalf("Expected lookup to return a context canceled error, got %v", err) @@ -366,7 +366,7 @@ func TestHighFreqUpdates(t *testing.T) { for i := uint64(0); i <= 994; i++ { T := now - 1000 + i data := Data{ - Payload: T, //our "payload" will be the timestamp itself. + Payload: T, // our "payload" will be the timestamp itself. Time: T, } epoch = store.Update(epoch, T, &data) @@ -448,7 +448,7 @@ func TestSparseUpdates(t *testing.T) { for j := uint64(0); j < 10; j++ { T := Year*5*i + j // write a burst of 10 updates every 5 years 3 times starting in Jan 1st 1970 and then silence data := Data{ - Payload: T, //our "payload" will be the timestamp itself. + Payload: T, // our "payload" will be the timestamp itself. Time: T, } epoch = store.Update(epoch, T, &data) diff --git a/pkg/file/IFile.go b/pkg/file/IFile.go index 63dadfb1..7a6e7862 100644 --- a/pkg/file/IFile.go +++ b/pkg/file/IFile.go @@ -19,10 +19,10 @@ package file import "io" type IFile interface { - Upload(fd io.Reader, podFileName string, fileSize int64, blockSize uint32, podPath, compression string) error - Download(podFileWithPath string) (io.ReadCloser, uint64, error) - ListFiles(files []string) ([]Entry, error) - GetStats(podName, podFileWithPath string) (*Stats, error) - RmFile(podFileWithPath string) error - LoadFileMeta(fileNameWithPath string) error + Upload(fd io.Reader, podFileName string, fileSize int64, blockSize uint32, podPath, compression, podPassword string) error + Download(podFileWithPath, podPassword string) (io.ReadCloser, uint64, error) + ListFiles(files []string, podPassword string) ([]Entry, error) + GetStats(podName, podFileWithPath, podPassword string) (*Stats, error) + RmFile(podFileWithPath, podPassword string) error + LoadFileMeta(fileNameWithPath, podPassword string) error } diff --git a/pkg/file/download.go b/pkg/file/download.go index 2f02f8f3..8a9eb425 100644 --- a/pkg/file/download.go +++ b/pkg/file/download.go @@ -38,7 +38,7 @@ var ( // Download does all the validation for the existence of the file and creates a // Reader to read the contents of the file from the pod. -func (f *File) Download(podFileWithPath string) (io.ReadCloser, uint64, error) { +func (f *File) Download(podFileWithPath, podPassword string) (io.ReadCloser, uint64, error) { // check if file present totalFilePath := utils.CombinePathAndFile(podFileWithPath, "") if !f.IsFileAlreadyPresent(totalFilePath) { @@ -49,26 +49,71 @@ func (f *File) Download(podFileWithPath string) (io.ReadCloser, uint64, error) { if meta == nil { // skipcq: TCV-001 return nil, 0, ErrFileNotFound } + encryptedFileInodeBytes, _, err := f.getClient().DownloadBlob(meta.InodeAddress) + if err != nil { // skipcq: TCV-001 + return nil, 0, err + } + fileInodeBytes, err := utils.DecryptBytes([]byte(podPassword), encryptedFileInodeBytes) + if err != nil { // skipcq: TCV-001 + return nil, 0, err + } + var fileInode INode + err = json.Unmarshal(fileInodeBytes, &fileInode) + if err != nil { // skipcq: TCV-001 + return nil, 0, err + } + + // need to change the access time for podFile if it is owned by user + if !f.fd.IsReadOnlyFeed() { + meta.AccessTime = time.Now().Unix() + err = f.updateMeta(meta, podPassword) + if err != nil { // skipcq: TCV-001 + return nil, 0, err + } + } - fileInodeBytes, _, err := f.getClient().DownloadBlob(meta.InodeAddress) + reader := NewReader(fileInode, f.getClient(), meta.Size, meta.BlockSize, meta.Compression, podPassword, false) + return reader, meta.Size, nil +} + +// ReadSeeker does all the validation for the existence of the file and creates a +// ReadSeekCloser to read the contents of the file from the pod. +func (f *File) ReadSeeker(podFileWithPath, podPassword string) (io.ReadSeekCloser, uint64, error) { + // check if file present + totalFilePath := utils.CombinePathAndFile(podFileWithPath, "") + if !f.IsFileAlreadyPresent(totalFilePath) { + return nil, 0, ErrFileNotPresent + } + + meta := f.GetFromFileMap(totalFilePath) + if meta == nil { // skipcq: TCV-001 + return nil, 0, ErrFileNotFound + } + + encryptedFileInodeBytes, _, err := f.getClient().DownloadBlob(meta.InodeAddress) if err != nil { // skipcq: TCV-001 return nil, 0, err } + fileInodeBytes, err := utils.DecryptBytes([]byte(podPassword), encryptedFileInodeBytes) + if err != nil { // skipcq: TCV-001 + return nil, 0, err + } + var fileInode INode err = json.Unmarshal(fileInodeBytes, &fileInode) if err != nil { // skipcq: TCV-001 return nil, 0, err } - //need to change the access time for podFile if it is owned by user + // need to change the access time for podFile if it is owned by user if !f.fd.IsReadOnlyFeed() { meta.AccessTime = time.Now().Unix() - err = f.updateMeta(meta) + err = f.updateMeta(meta, podPassword) if err != nil { // skipcq: TCV-001 return nil, 0, err } } - reader := NewReader(fileInode, f.getClient(), meta.Size, meta.BlockSize, meta.Compression, false) + reader := NewReader(fileInode, f.getClient(), meta.Size, meta.BlockSize, meta.Compression, podPassword, false) return reader, meta.Size, nil } diff --git a/pkg/file/download_test.go b/pkg/file/download_test.go index 6a58fe69..9920231e 100644 --- a/pkg/file/download_test.go +++ b/pkg/file/download_test.go @@ -18,8 +18,14 @@ package file_test import ( "bytes" + "context" "io" "testing" + "time" + + "github.com/fairdatasociety/fairOS-dfs/pkg/pod" + + "github.com/plexsysio/taskmanager" "github.com/fairdatasociety/fairOS-dfs/pkg/account" "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" @@ -33,42 +39,103 @@ func TestDownload(t *testing.T) { mockClient := mock.NewMockBeeClient() logger := logging.New(io.Discard, 0) acc := account.New(logger) - _, _, err := acc.CreateUserAccount("password", "") + _, _, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } - pod1AccountInfo, err := acc.CreatePodAccount(1, "password", false) + pod1AccountInfo, err := acc.CreatePodAccount(1, false) if err != nil { t.Fatal(err) } fd := feed.New(pod1AccountInfo, mockClient, logger) user := acc.GetAddress(1) - + tm := taskmanager.New(1, 10, time.Second*15, logger) + defer func() { + _ = tm.Stop(context.Background()) + }() t.Run("download-small-file", func(t *testing.T) { + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + filePath := "/dir1" fileName := "file1" compression := "" fileSize := int64(100) blockSize := uint32(10) - fileObject := file.NewFile("pod1", mockClient, fd, user, logger) + fileObject := file.NewFile("pod1", mockClient, fd, user, tm, logger) + + // file existent check + podFile := utils.CombinePathAndFile(filePath, fileName) + if fileObject.IsFileAlreadyPresent(podFile) { + t.Fatal("file should not be present") + } + _, _, err = fileObject.Download(podFile, podPassword) + if err == nil { + t.Fatal("file should not be present for download") + } + // upload a file + content, err := uploadFile(t, fileObject, filePath, fileName, compression, podPassword, fileSize, blockSize) + if err != nil { + t.Fatal(err) + } + + // Download the file and read from reader + reader, _, err := fileObject.Download(podFile, podPassword) + if err != nil { + t.Fatal(err) + } + rcvdBuffer := new(bytes.Buffer) + _, err = rcvdBuffer.ReadFrom(reader) + if err != nil { + t.Fatal(err) + } + + // Download the file and read from reader + reader2, rcvdSize2, err := fileObject.Download(podFile, podPassword) + if err != nil { + t.Fatal(err) + } + rcvdBuffer2 := new(bytes.Buffer) + _, err = rcvdBuffer2.ReadFrom(reader2) + if err != nil { + t.Fatal(err) + } + + // validate the result + if len(rcvdBuffer2.Bytes()) != len(content) || int(rcvdSize2) != len(content) { + t.Fatalf("downloaded content size is invalid") + } + if !bytes.Equal(content, rcvdBuffer2.Bytes()) { + t.Fatalf("downloaded content is not equal") + } + + }) + + t.Run("download-small-file-gzip", func(t *testing.T) { + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + filePath := "/dir1" + fileName := "file1" + compression := "gzip" + fileSize := int64(100) + blockSize := uint32(164000) + fileObject := file.NewFile("pod1", mockClient, fd, user, tm, logger) // file existent check podFile := utils.CombinePathAndFile(filePath, fileName) if fileObject.IsFileAlreadyPresent(podFile) { t.Fatal("file should not be present") } - _, _, err = fileObject.Download(podFile) + _, _, err = fileObject.Download(podFile, podPassword) if err == nil { t.Fatal("file should not be present for download") } // upload a file - content, err := uploadFile(t, fileObject, filePath, fileName, compression, fileSize, blockSize) + content, err := uploadFile(t, fileObject, filePath, fileName, compression, podPassword, fileSize, blockSize) if err != nil { t.Fatal(err) } // Download the file and read from reader - reader, rcvdSize, err := fileObject.Download(podFile) + reader, rcvdSize, err := fileObject.Download(podFile, podPassword) if err != nil { t.Fatal(err) } diff --git a/pkg/file/file.go b/pkg/file/file.go index ec0eed05..fc1c29f4 100644 --- a/pkg/file/file.go +++ b/pkg/file/file.go @@ -17,8 +17,14 @@ limitations under the License. package file import ( + "context" + "encoding/json" + "fmt" + "strconv" "sync" + "github.com/fairdatasociety/fairOS-dfs/pkg/taskmanager" + "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore" "github.com/fairdatasociety/fairOS-dfs/pkg/feed" "github.com/fairdatasociety/fairOS-dfs/pkg/logging" @@ -34,10 +40,12 @@ type File struct { fileMap map[string]*MetaData fileMu *sync.RWMutex logger logging.Logger + syncManager taskmanager.TaskManagerGO } // NewFile creates the base file object which has all the methods related to file manipulation. -func NewFile(podName string, client blockstore.Client, fd *feed.API, user utils.Address, logger logging.Logger) *File { +func NewFile(podName string, client blockstore.Client, fd *feed.API, user utils.Address, + m taskmanager.TaskManagerGO, logger logging.Logger) *File { return &File{ podName: podName, userAddress: user, @@ -46,6 +54,7 @@ func NewFile(podName string, client blockstore.Client, fd *feed.API, user utils. fileMap: make(map[string]*MetaData), fileMu: &sync.RWMutex{}, logger: logger, + syncManager: m, } } @@ -93,3 +102,58 @@ func (f *File) RemoveAllFromFileMap() { defer f.fileMu.Unlock() f.fileMap = make(map[string]*MetaData) } + +type lsTask struct { + f *File + topic []byte + path string + podPassword string + entries *[]Entry + mtx sync.Locker + wg *sync.WaitGroup +} + +func newLsTask(f *File, topic []byte, path, podPassword string, l *[]Entry, mtx sync.Locker, wg *sync.WaitGroup) *lsTask { + return &lsTask{ + f: f, + topic: topic, + path: path, + entries: l, + mtx: mtx, + wg: wg, + podPassword: podPassword, + } +} + +func (lt *lsTask) Execute(context.Context) error { + defer lt.wg.Done() + _, data, err := lt.f.fd.GetFeedData(lt.topic, lt.f.userAddress, []byte(lt.podPassword)) + if err != nil { // skipcq: TCV-001 + return fmt.Errorf("file mtdt : %v", err) + } + if string(data) == utils.DeletedFeedMagicWord { // skipcq: TCV-001 + return nil + } + var meta *MetaData + err = json.Unmarshal(data, &meta) + if err != nil { // skipcq: TCV-001 + return fmt.Errorf("file mtdt : %v", err) + } + entry := Entry{ + Name: meta.Name, + ContentType: meta.ContentType, + Size: strconv.FormatUint(meta.Size, 10), + BlockSize: strconv.FormatInt(int64(meta.BlockSize), 10), + CreationTime: strconv.FormatInt(meta.CreationTime, 10), + AccessTime: strconv.FormatInt(meta.AccessTime, 10), + ModificationTime: strconv.FormatInt(meta.ModificationTime, 10), + } + lt.mtx.Lock() + defer lt.mtx.Unlock() + *lt.entries = append(*lt.entries, entry) + return nil +} + +func (lt *lsTask) Name() string { + return lt.path +} diff --git a/pkg/file/inode.go b/pkg/file/inode.go index b9e99ad0..64a0708f 100644 --- a/pkg/file/inode.go +++ b/pkg/file/inode.go @@ -19,12 +19,11 @@ package file import "github.com/fairdatasociety/fairOS-dfs/pkg/utils" type INode struct { - Blocks []*BlockInfo + Blocks []*BlockInfo `json:"blocks"` } type BlockInfo struct { - Name string - Size uint32 - CompressedSize uint32 - Reference utils.Reference + Size uint32 `json:"size"` + CompressedSize uint32 `json:"compressedSize"` + Reference utils.Reference `json:"reference"` } diff --git a/pkg/file/ls.go b/pkg/file/ls.go index 95069bf9..30041820 100644 --- a/pkg/file/ls.go +++ b/pkg/file/ls.go @@ -17,46 +17,36 @@ limitations under the License. package file import ( - "encoding/json" - "strconv" + "fmt" + "sync" "github.com/fairdatasociety/fairOS-dfs/pkg/utils" ) type Entry struct { Name string `json:"name"` - ContentType string `json:"content_type"` + ContentType string `json:"contentType"` Size string `json:"size,omitempty"` - BlockSize string `json:"block_size,omitempty"` - CreationTime string `json:"creation_time"` - ModificationTime string `json:"modification_time"` - AccessTime string `json:"access_time"` + BlockSize string `json:"blockSize,omitempty"` + CreationTime string `json:"creationTime"` + ModificationTime string `json:"modificationTime"` + AccessTime string `json:"accessTime"` } // ListFiles given a list of files, list files gives back the information related to each file. -func (f *File) ListFiles(files []string) ([]Entry, error) { - var fileEntries []Entry +func (f *File) ListFiles(files []string, podPassword string) ([]Entry, error) { + fileEntries := &[]Entry{} + wg := new(sync.WaitGroup) + mtx := &sync.Mutex{} for _, filePath := range files { fileTopic := utils.HashString(utils.CombinePathAndFile(filePath, "")) - _, data, err := f.fd.GetFeedData(fileTopic, f.userAddress) + wg.Add(1) + lsTask := newLsTask(f, fileTopic, filePath, podPassword, fileEntries, mtx, wg) + _, err := f.syncManager.Go(lsTask) if err != nil { // skipcq: TCV-001 - continue + return nil, fmt.Errorf("list files : %v", err) } - var meta *MetaData - err = json.Unmarshal(data, &meta) - if err != nil { // skipcq: TCV-001 - continue - } - entry := Entry{ - Name: meta.Name, - ContentType: meta.ContentType, - Size: strconv.FormatUint(meta.Size, 10), - BlockSize: strconv.FormatInt(int64(meta.BlockSize), 10), - CreationTime: strconv.FormatInt(meta.CreationTime, 10), - AccessTime: strconv.FormatInt(meta.AccessTime, 10), - ModificationTime: strconv.FormatInt(meta.ModificationTime, 10), - } - fileEntries = append(fileEntries, entry) } - return fileEntries, nil + wg.Wait() + return *fileEntries, nil } diff --git a/pkg/file/ls_test.go b/pkg/file/ls_test.go index 3021aab4..46d400c2 100644 --- a/pkg/file/ls_test.go +++ b/pkg/file/ls_test.go @@ -17,9 +17,16 @@ limitations under the License. package file_test import ( + "context" "io" "strconv" "testing" + "time" + + "github.com/fairdatasociety/fairOS-dfs/pkg/pod" + "github.com/fairdatasociety/fairOS-dfs/pkg/utils" + + "github.com/plexsysio/taskmanager" "github.com/fairdatasociety/fairOS-dfs/pkg/account" "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" @@ -32,45 +39,78 @@ func TestListFiles(t *testing.T) { mockClient := mock.NewMockBeeClient() logger := logging.New(io.Discard, 0) acc := account.New(logger) - _, _, err := acc.CreateUserAccount("password", "") + _, _, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } - pod1AccountInfo, err := acc.CreatePodAccount(1, "password", false) + pod1AccountInfo, err := acc.CreatePodAccount(1, false) if err != nil { t.Fatal(err) } fd := feed.New(pod1AccountInfo, mockClient, logger) user := acc.GetAddress(1) - + tm := taskmanager.New(1, 10, time.Second*15, logger) + defer func() { + _ = tm.Stop(context.Background()) + }() t.Run("list-file", func(t *testing.T) { - fileObject := file.NewFile("pod1", mockClient, fd, user, logger) + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + fileObject := file.NewFile("pod1", mockClient, fd, user, tm, logger) // upload few files - _, err = uploadFile(t, fileObject, "/dir1", "file1", "", 100, 10) + _, err = uploadFile(t, fileObject, "/dir1", "file1", "", podPassword, 100, 10) if err != nil { t.Fatal(err) } - _, err = uploadFile(t, fileObject, "/dir1", "file2", "", 200, 20) + _, err = uploadFile(t, fileObject, "/dir1", "file2", "", podPassword, 200, 20) if err != nil { t.Fatal(err) } - _, err = uploadFile(t, fileObject, "/dir1", "file3", "", 300, 30) + _, err = uploadFile(t, fileObject, "/dir1", "file3", "", podPassword, 300, 30) if err != nil { t.Fatal(err) } - //list the files + // list the files fileList := []string{"/dir1/file1", "/dir1/file2", "/dir1/file3"} - entries, err := fileObject.ListFiles(fileList) + entries, err := fileObject.ListFiles(fileList, podPassword) if err != nil { t.Fatal(err) } + foundIndex1 := -1 + for i, v := range entries { + if v.Name == "file1" { + foundIndex1 = i + } + } + + if foundIndex1 < 0 { + t.Fatal("file1 not found") + } + foundIndex2 := -1 + for i, v := range entries { + if v.Name == "file2" { + foundIndex2 = i + } + } + if foundIndex2 < 0 { + t.Fatal("file1 not found") + } + foundIndex3 := -1 + for i, v := range entries { + if v.Name == "file3" { + foundIndex3 = i + } + } + if foundIndex3 < 0 { + t.Fatal("file1 not found") + } + // validate the entries - entry := entries[0] + entry := entries[foundIndex1] if entry.Name != "file1" { t.Fatalf("invalid name") } @@ -80,7 +120,7 @@ func TestListFiles(t *testing.T) { if entry.BlockSize != strconv.FormatUint(10, 10) { t.Fatalf("invalid block size") } - entry = entries[1] + entry = entries[foundIndex2] if entry.Name != "file2" { t.Fatalf("invalid name") } @@ -90,7 +130,7 @@ func TestListFiles(t *testing.T) { if entry.BlockSize != strconv.FormatUint(20, 10) { t.Fatalf("invalid block size") } - entry = entries[2] + entry = entries[foundIndex3] if entry.Name != "file3" { t.Fatalf("invalid name") } diff --git a/pkg/file/meta.go b/pkg/file/meta.go index 91f9b90f..05f18143 100644 --- a/pkg/file/meta.go +++ b/pkg/file/meta.go @@ -20,36 +20,35 @@ import ( "encoding/json" "errors" "fmt" + "path/filepath" "time" "github.com/fairdatasociety/fairOS-dfs/pkg/utils" ) var ( - MetaVersion uint8 = 1 + MetaVersion uint8 = 2 ErrDeletedFeed = errors.New("deleted feed") ) type MetaData struct { - Version uint8 `json:"version"` - UserAddress utils.Address `json:"user_address"` - PodName string `json:"pod_name"` - Path string `json:"file_path"` - Name string `json:"file_name"` - Size uint64 `json:"file_size"` - BlockSize uint32 `json:"block_size"` - ContentType string `json:"content_type"` - Compression string `json:"compression"` - CreationTime int64 `json:"creation_time"` - AccessTime int64 `json:"access_time"` - ModificationTime int64 `json:"modification_time"` - InodeAddress []byte `json:"file_inode_reference"` + Version uint8 `json:"version"` + Path string `json:"filePath"` + Name string `json:"fileName"` + Size uint64 `json:"fileSize"` + BlockSize uint32 `json:"blockSize"` + ContentType string `json:"contentType"` + Compression string `json:"compression"` + CreationTime int64 `json:"creationTime"` + AccessTime int64 `json:"accessTime"` + ModificationTime int64 `json:"modificationTime"` + InodeAddress []byte `json:"fileInodeReference"` } // LoadFileMeta is used in syncing -func (f *File) LoadFileMeta(fileNameWithPath string) error { - meta, err := f.GetMetaFromFileName(fileNameWithPath, f.userAddress) +func (f *File) LoadFileMeta(fileNameWithPath, podPassword string) error { + meta, err := f.GetMetaFromFileName(fileNameWithPath, podPassword, f.userAddress) if err != nil { // skipcq: TCV-001 if err == ErrDeletedFeed { return nil @@ -61,19 +60,19 @@ func (f *File) LoadFileMeta(fileNameWithPath string) error { return nil } -func (f *File) handleMeta(meta *MetaData) error { +func (f *File) handleMeta(meta *MetaData, podPassword string) error { // check if meta is present. totalPath := utils.CombinePathAndFile(meta.Path, meta.Name) - _, err := f.GetMetaFromFileName(totalPath, meta.UserAddress) + _, err := f.GetMetaFromFileName(totalPath, podPassword, f.userAddress) if err != nil { if err != ErrDeletedFeed { - return f.uploadMeta(meta) + return f.uploadMeta(meta, podPassword) } } - return f.updateMeta(meta) + return f.updateMeta(meta, podPassword) } -func (f *File) uploadMeta(meta *MetaData) error { +func (f *File) uploadMeta(meta *MetaData, podPassword string) error { // marshall the meta structure fileMetaBytes, err := json.Marshal(meta) if err != nil { // skipcq: TCV-001 @@ -83,7 +82,7 @@ func (f *File) uploadMeta(meta *MetaData) error { // put the file meta as a feed totalPath := utils.CombinePathAndFile(meta.Path, meta.Name) topic := utils.HashString(totalPath) - _, err = f.fd.CreateFeed(topic, meta.UserAddress, fileMetaBytes) + _, err = f.fd.CreateFeed(topic, f.userAddress, fileMetaBytes, []byte(podPassword)) if err != nil { // skipcq: TCV-001 return err } @@ -91,13 +90,22 @@ func (f *File) uploadMeta(meta *MetaData) error { return nil } -func (f *File) deleteMeta(meta *MetaData) error { +func (f *File) deleteMeta(meta *MetaData, podPassword string) error { totalPath := utils.CombinePathAndFile(meta.Path, meta.Name) topic := utils.HashString(totalPath) - return f.fd.DeleteFeed(topic, meta.UserAddress) + // update with utils.DeletedFeedMagicWord + _, err := f.fd.UpdateFeed(topic, f.userAddress, []byte(utils.DeletedFeedMagicWord), []byte(podPassword)) + if err != nil { // skipcq: TCV-001 + return err + } + err = f.fd.DeleteFeed(topic, f.userAddress) + if err != nil { + f.logger.Warningf("failed to remove file feed %s", totalPath) + } + return nil } -func (f *File) updateMeta(meta *MetaData) error { +func (f *File) updateMeta(meta *MetaData, podPassword string) error { // marshall the meta structure fileMetaBytes, err := json.Marshal(meta) if err != nil { // skipcq: TCV-001 @@ -107,7 +115,7 @@ func (f *File) updateMeta(meta *MetaData) error { // put the file meta as a feed totalPath := utils.CombinePathAndFile(meta.Path, meta.Name) topic := utils.HashString(totalPath) - _, err = f.fd.UpdateFeed(topic, meta.UserAddress, fileMetaBytes) + _, err = f.fd.UpdateFeed(topic, f.userAddress, fileMetaBytes, []byte(podPassword)) if err != nil { // skipcq: TCV-001 return err } @@ -115,13 +123,13 @@ func (f *File) updateMeta(meta *MetaData) error { return nil } -func (f *File) BackupFromFileName(fileNameWithPath string) (*MetaData, error) { - p, err := f.GetMetaFromFileName(fileNameWithPath, f.userAddress) +func (f *File) BackupFromFileName(fileNameWithPath, podPassword string) (*MetaData, error) { + p, err := f.GetMetaFromFileName(fileNameWithPath, podPassword, f.userAddress) if err != nil { return nil, err } - err = f.deleteMeta(p) + err = f.deleteMeta(p, podPassword) if err != nil { return nil, err } @@ -131,7 +139,7 @@ func (f *File) BackupFromFileName(fileNameWithPath string) (*MetaData, error) { p.ModificationTime = time.Now().Unix() // upload PreviousMeta - err = f.uploadMeta(p) + err = f.uploadMeta(p, podPassword) if err != nil { return nil, err } @@ -141,14 +149,49 @@ func (f *File) BackupFromFileName(fileNameWithPath string) (*MetaData, error) { return p, nil } -func (f *File) GetMetaFromFileName(fileNameWithPath string, userAddress utils.Address) (*MetaData, error) { +func (f *File) RenameFromFileName(fileNameWithPath, newFileNameWithPath, podPassword string) (*MetaData, error) { + fileNameWithPath = filepath.ToSlash(fileNameWithPath) + newFileNameWithPath = filepath.ToSlash(newFileNameWithPath) + p, err := f.GetMetaFromFileName(fileNameWithPath, podPassword, f.userAddress) + if err != nil { + return nil, err + } + + // remove old meta and from file map + err = f.deleteMeta(p, podPassword) + if err != nil { + return nil, err + } + f.RemoveFromFileMap(fileNameWithPath) + + newFileName := filepath.Base(newFileNameWithPath) + newPrnt := filepath.ToSlash(filepath.Dir(newFileNameWithPath)) + + // change previous meta.Name + p.Name = newFileName + p.Path = newPrnt + p.ModificationTime = time.Now().Unix() + + // upload meta + err = f.uploadMeta(p, podPassword) + if err != nil { + return nil, err + } + + // add file to map + f.AddToFileMap(newFileNameWithPath, p) + return p, nil +} + +func (f *File) GetMetaFromFileName(fileNameWithPath, podPassword string, userAddress utils.Address) (*MetaData, error) { topic := utils.HashString(fileNameWithPath) - _, metaBytes, err := f.fd.GetFeedData(topic, userAddress) + _, metaBytes, err := f.fd.GetFeedData(topic, userAddress, []byte(podPassword)) if err != nil { return nil, err } if string(metaBytes) == utils.DeletedFeedMagicWord { + f.logger.Errorf("found deleted feed for %s\n", fileNameWithPath) return nil, ErrDeletedFeed } @@ -161,6 +204,6 @@ func (f *File) GetMetaFromFileName(fileNameWithPath string, userAddress utils.Ad return meta, nil } -func (f *File) PutMetaForFile(meta *MetaData) error { - return f.updateMeta(meta) +func (f *File) PutMetaForFile(meta *MetaData, podPassword string) error { + return f.updateMeta(meta, podPassword) } diff --git a/pkg/file/mock/mock_file.go b/pkg/file/mock/mock_file.go index 52c4bfcd..3c8ef336 100644 --- a/pkg/file/mock/mock_file.go +++ b/pkg/file/mock/mock_file.go @@ -29,23 +29,23 @@ func NewMockFile() *File { return &File{} } -func (*File) Upload(_ io.Reader, _ string, _ int64, _ uint32, _, _ string) error { +func (*File) Upload(_ io.Reader, _ string, _ int64, _ uint32, _, _, _ string) error { return nil } -func (*File) Download(_ string) (io.ReadCloser, uint64, error) { +func (*File) Download(_, _ string) (io.ReadCloser, uint64, error) { return nil, 0, nil } -func (*File) ListFiles(_ []string) ([]file.Entry, error) { +func (*File) ListFiles(_ []string, _ string) ([]file.Entry, error) { return nil, nil } -func (*File) GetStats(_, _ string) (*file.Stats, error) { +func (*File) GetStats(_, _, _ string) (*file.Stats, error) { return nil, nil } -func (*File) RmFile(_ string) error { +func (*File) RmFile(_, _ string) error { return nil } @@ -57,6 +57,6 @@ func (*File) AddFileToPath(_, _ string) error { return nil } -func (*File) LoadFileMeta(_ string) error { +func (*File) LoadFileMeta(_, _ string) error { return nil } diff --git a/pkg/file/reader.go b/pkg/file/reader.go index e91138c5..de92f539 100644 --- a/pkg/file/reader.go +++ b/pkg/file/reader.go @@ -40,62 +40,72 @@ var ( ) type Reader struct { - readOffset int64 - client blockstore.Client - fileInode INode - fileC chan []byte - lastBlock []byte - fileSize uint64 - blockSize uint32 - blockCursor uint32 - totalSize uint64 - compression string - blockCache *lru.Cache + encryptionPassword string + readOffset int64 + client blockstore.Client + fileInode INode + fileC chan []byte + lastBlock []byte + fileSize uint64 + blockSize uint32 + blockCursor uint32 + totalSize uint64 + compression string + blockCache *lru.Cache rlBuffer []byte rlOffset int rlReadNewLine bool } -// OpenFileForIndex opens file for indexing for documetn db from pod filepath +// OpenFileForIndex opens file for indexing for document db from pod filepath // TODO test // skipcq: TCV-001 -func (f *File) OpenFileForIndex(podFile string) (*Reader, error) { +func (f *File) OpenFileForIndex(podFile, podPassword string) (*Reader, error) { meta := f.GetFromFileMap(podFile) if meta == nil { return nil, fmt.Errorf("file not found in dfs") } - fileInodeBytes, _, err := f.getClient().DownloadBlob(meta.InodeAddress) + encryptedFileInodeBytes, _, err := f.getClient().DownloadBlob(meta.InodeAddress) if err != nil { return nil, err } + + temp := make([]byte, len(encryptedFileInodeBytes)) + copy(temp, encryptedFileInodeBytes) + fileInodeBytes, err := utils.DecryptBytes([]byte(podPassword), temp) + if err != nil { + return nil, err + } + var fileInode INode err = json.Unmarshal(fileInodeBytes, &fileInode) if err != nil { return nil, err } - reader := NewReader(fileInode, f.getClient(), meta.Size, meta.BlockSize, meta.Compression, true) + reader := NewReader(fileInode, f.getClient(), meta.Size, meta.BlockSize, meta.Compression, "encryptionPassword", true) return reader, nil } // NewReader create a new reader object to read a file from the pod based on its configuration. -func NewReader(fileInode INode, client blockstore.Client, fileSize uint64, blockSize uint32, compression string, cache bool) *Reader { +func NewReader(fileInode INode, client blockstore.Client, fileSize uint64, blockSize uint32, compression, encryptionPassword string, cache bool) *Reader { var blockCache *lru.Cache if cache { blockCache, _ = lru.New(blockCacheSize) } r := &Reader{ - fileInode: fileInode, - client: client, - fileC: make(chan []byte), - fileSize: fileSize, - blockSize: blockSize, - compression: compression, - blockCache: blockCache, - rlReadNewLine: false, + encryptionPassword: encryptionPassword, + fileInode: fileInode, + client: client, + fileC: make(chan []byte), + fileSize: fileSize, + blockSize: blockSize, + compression: compression, + blockCache: blockCache, + rlReadNewLine: false, } return r } @@ -115,7 +125,6 @@ func (r *Reader) Read(b []byte) (n int, err error) { r.blockCursor += bytesToRead r.readOffset += int64(bytesToRead) bytesRead = int(bytesToRead) - //bytesToRead = 0 if r.blockCursor == r.blockSize { r.lastBlock = nil r.blockCursor = 0 @@ -158,7 +167,7 @@ func (r *Reader) Read(b []byte) (n int, err error) { r.blockSize = uint32(len(r.lastBlock)) } - //if length of bytes to read is greater than block size + // if length of bytes to read is greater than block size if bytesToRead > r.blockSize { bytesToRead = r.blockSize } @@ -306,14 +315,22 @@ func (r *Reader) getBlock(ref []byte, compression string, blockSize uint32) ([]b return data.([]byte), nil } } - stdoutBytes, _, err := r.client.DownloadBlob(ref) + encryptedData, _, err := r.client.DownloadBlob(ref) if err != nil { // skipcq: TCV-001 return nil, err } + + temp := make([]byte, len(encryptedData)) + copy(temp, encryptedData) + stdoutBytes, err := utils.DecryptBytes([]byte(r.encryptionPassword), temp) + if err != nil { + return nil, err + } decompressedData, err := Decompress(stdoutBytes, compression, blockSize) if err != nil { // skipcq: TCV-001 return nil, err } + if r.blockCache != nil { r.blockCache.Add(refStr, decompressedData) } diff --git a/pkg/file/reader_test.go b/pkg/file/reader_test.go index b41c334d..2c6edb4c 100644 --- a/pkg/file/reader_test.go +++ b/pkg/file/reader_test.go @@ -20,11 +20,12 @@ import ( "bytes" "crypto/rand" "errors" - "fmt" "io" "math/big" "testing" + "github.com/fairdatasociety/fairOS-dfs/pkg/pod" + "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" "github.com/fairdatasociety/fairOS-dfs/pkg/file" "github.com/fairdatasociety/fairOS-dfs/pkg/utils" @@ -36,8 +37,9 @@ func TestFileReader(t *testing.T) { t.Run("read-entire-file-shorter-than-block", func(t *testing.T) { fileSize := uint64(15) blockSize := uint32(20) - fileInode := createFile(t, fileSize, blockSize, "", mockClient) - reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, "", false) + podpassword, _ := utils.GetRandString(pod.PodPasswordLength) + _, fileInode := createFile(t, fileSize, blockSize, "", podpassword, mockClient) + reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, "", podpassword, false) defer reader.Close() _, err := reader.Seek(10, 0) if err != nil { @@ -56,8 +58,9 @@ func TestFileReader(t *testing.T) { t.Run("read-entire-file-shorter-than-block-2", func(t *testing.T) { fileSize := uint64(15) blockSize := uint32(20) - fileInode := createFile(t, fileSize, blockSize, "", mockClient) - reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, "", false) + podpassword, _ := utils.GetRandString(pod.PodPasswordLength) + _, fileInode := createFile(t, fileSize, blockSize, "", podpassword, mockClient) + reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, "", podpassword, false) defer reader.Close() _, err := reader.Seek(10, 0) if err != nil { @@ -73,8 +76,9 @@ func TestFileReader(t *testing.T) { t.Run("read-entire-file-shorter-than-block-3", func(t *testing.T) { fileSize := uint64(15) blockSize := uint32(20) - fileInode := createFile(t, fileSize, blockSize, "", mockClient) - reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, "", false) + podpassword, _ := utils.GetRandString(pod.PodPasswordLength) + _, fileInode := createFile(t, fileSize, blockSize, "", podpassword, mockClient) + reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, "", podpassword, false) defer reader.Close() _, err := reader.Seek(10, 0) if err != nil { @@ -93,8 +97,10 @@ func TestFileReader(t *testing.T) { t.Run("read-seek", func(t *testing.T) { fileSize := uint64(15) blockSize := uint32(20) - fileInode := createFile(t, fileSize, blockSize, "", mockClient) - reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, "", false) + + podpassword, _ := utils.GetRandString(pod.PodPasswordLength) + _, fileInode := createFile(t, fileSize, blockSize, "", podpassword, mockClient) + reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, "", podpassword, false) defer reader.Close() _, err := reader.Seek(16, 0) if !errors.Is(err, file.ErrInvalidOffset) { @@ -105,8 +111,9 @@ func TestFileReader(t *testing.T) { t.Run("read-seek-offset-zero", func(t *testing.T) { fileSize := uint64(15) blockSize := uint32(20) - fileInode := createFile(t, fileSize, blockSize, "", mockClient) - reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, "", false) + podpassword, _ := utils.GetRandString(pod.PodPasswordLength) + _, fileInode := createFile(t, fileSize, blockSize, "", podpassword, mockClient) + reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, "", podpassword, false) defer reader.Close() _, err := reader.Seek(0, 0) if err != nil { @@ -125,11 +132,13 @@ func TestFileReader(t *testing.T) { t.Run("read-entire-file", func(t *testing.T) { fileSize := uint64(100) blockSize := uint32(10) - fileInode := createFile(t, fileSize, blockSize, "", mockClient) - reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, "", false) + podpassword, _ := utils.GetRandString(pod.PodPasswordLength) + b, fileInode := createFile(t, fileSize, blockSize, "", podpassword, mockClient) + reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, "", podpassword, false) defer reader.Close() + outputBytes := readFileContents(t, fileSize, reader) - if !checkFileContents(t, fileInode, outputBytes, mockClient, "") { + if !bytes.Equal(b, outputBytes) { t.Fatalf("file contents are not same") } }) @@ -137,11 +146,12 @@ func TestFileReader(t *testing.T) { t.Run("read-file-with-last-block-shorter", func(t *testing.T) { fileSize := uint64(93) blockSize := uint32(10) - fileInode := createFile(t, fileSize, blockSize, "", mockClient) - reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, "", false) + podpassword, _ := utils.GetRandString(pod.PodPasswordLength) + b, fileInode := createFile(t, fileSize, blockSize, "", podpassword, mockClient) + reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, "", podpassword, false) defer reader.Close() outputBytes := readFileContents(t, fileSize, reader) - if !checkFileContents(t, fileInode, outputBytes, mockClient, "") { + if !bytes.Equal(b, outputBytes) { t.Fatalf("file contents are not same") } }) @@ -150,11 +160,12 @@ func TestFileReader(t *testing.T) { fileSize := uint64(1638500) blockSize := uint32(163850) compression := "gzip" - fileInode := createFile(t, fileSize, blockSize, compression, mockClient) - reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, compression, false) + podpassword, _ := utils.GetRandString(pod.PodPasswordLength) + b, fileInode := createFile(t, fileSize, blockSize, compression, podpassword, mockClient) + reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, compression, podpassword, false) defer reader.Close() outputBytes := readFileContents(t, fileSize, reader) - if !checkFileContents(t, fileInode, outputBytes, mockClient, compression) { + if !bytes.Equal(b, outputBytes) { t.Fatalf("file contents are not same") } }) @@ -163,11 +174,12 @@ func TestFileReader(t *testing.T) { fileSize := uint64(1999000) blockSize := uint32(200000) compression := "gzip" - fileInode := createFile(t, fileSize, blockSize, compression, mockClient) - reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, compression, false) + podpassword, _ := utils.GetRandString(pod.PodPasswordLength) + b, fileInode := createFile(t, fileSize, blockSize, compression, podpassword, mockClient) + reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, compression, podpassword, false) defer reader.Close() outputBytes := readFileContents(t, fileSize, reader) - if !checkFileContents(t, fileInode, outputBytes, mockClient, compression) { + if !bytes.Equal(b, outputBytes) { t.Fatalf("file contents are not same") } }) @@ -176,11 +188,12 @@ func TestFileReader(t *testing.T) { fileSize := uint64(100) blockSize := uint32(10) compression := "snappy" - fileInode := createFile(t, fileSize, blockSize, compression, mockClient) - reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, compression, false) + podpassword, _ := utils.GetRandString(pod.PodPasswordLength) + b, fileInode := createFile(t, fileSize, blockSize, compression, podpassword, mockClient) + reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, compression, podpassword, false) defer reader.Close() outputBytes := readFileContents(t, fileSize, reader) - if !checkFileContents(t, fileInode, outputBytes, mockClient, compression) { + if !bytes.Equal(b, outputBytes) { t.Fatalf("file contents are not same") } }) @@ -189,11 +202,12 @@ func TestFileReader(t *testing.T) { fileSize := uint64(93) blockSize := uint32(10) compression := "snappy" - fileInode := createFile(t, fileSize, blockSize, compression, mockClient) - reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, compression, false) + podpassword, _ := utils.GetRandString(pod.PodPasswordLength) + b, fileInode := createFile(t, fileSize, blockSize, compression, podpassword, mockClient) + reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, compression, podpassword, false) defer reader.Close() outputBytes := readFileContents(t, fileSize, reader) - if !checkFileContents(t, fileInode, outputBytes, mockClient, compression) { + if !bytes.Equal(b, outputBytes) { t.Fatalf("file contents are not same") } }) @@ -202,11 +216,12 @@ func TestFileReader(t *testing.T) { fileSize := uint64(100) blockSize := uint32(10) linesPerBlock := uint32(2) - fileInode, _, _, _, _ := createFileWithNewlines(t, fileSize, blockSize, "", mockClient, linesPerBlock) - reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, "", false) + podpassword, _ := utils.GetRandString(pod.PodPasswordLength) + b, fileInode, _, _, _, _ := createFileWithNewlines(t, fileSize, blockSize, "", podpassword, mockClient, linesPerBlock) + reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, "", podpassword, false) defer reader.Close() outputBytes := readFileContentsUsingReadline(t, fileSize, reader) - if !checkFileContents(t, fileInode, outputBytes, mockClient, "") { + if !bytes.Equal(b, outputBytes) { t.Fatalf("file contents are not same") } }) @@ -215,11 +230,12 @@ func TestFileReader(t *testing.T) { fileSize := uint64(97) blockSize := uint32(10) linesPerBlock := uint32(2) - fileInode, _, _, _, _ := createFileWithNewlines(t, fileSize, blockSize, "", mockClient, linesPerBlock) - reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, "", false) + podpassword, _ := utils.GetRandString(pod.PodPasswordLength) + b, fileInode, _, _, _, _ := createFileWithNewlines(t, fileSize, blockSize, "", podpassword, mockClient, linesPerBlock) + reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, "", podpassword, false) defer reader.Close() outputBytes := readFileContentsUsingReadline(t, fileSize, reader) - if !checkFileContents(t, fileInode, outputBytes, mockClient, "") { + if !bytes.Equal(b, outputBytes) { t.Fatalf("file contents are not same") } }) @@ -229,11 +245,12 @@ func TestFileReader(t *testing.T) { blockSize := uint32(10) linesPerBlock := uint32(2) compression := "snappy" - fileInode, _, _, _, _ := createFileWithNewlines(t, fileSize, blockSize, compression, mockClient, linesPerBlock) - reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, compression, false) + podpassword, _ := utils.GetRandString(pod.PodPasswordLength) + b, fileInode, _, _, _, _ := createFileWithNewlines(t, fileSize, blockSize, compression, podpassword, mockClient, linesPerBlock) + reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, compression, podpassword, false) defer reader.Close() outputBytes := readFileContentsUsingReadline(t, fileSize, reader) - if !checkFileContents(t, fileInode, outputBytes, mockClient, compression) { + if !bytes.Equal(b, outputBytes) { t.Fatalf("file contents are not same") } }) @@ -242,8 +259,9 @@ func TestFileReader(t *testing.T) { fileSize := uint64(100) blockSize := uint32(10) linesPerBlock := uint32(2) - fileInode, lineStart, line, _, _ := createFileWithNewlines(t, fileSize, blockSize, "", mockClient, linesPerBlock) - reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, "", false) + podpassword, _ := utils.GetRandString(pod.PodPasswordLength) + _, fileInode, lineStart, line, _, _ := createFileWithNewlines(t, fileSize, blockSize, "", podpassword, mockClient, linesPerBlock) + reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, "", podpassword, false) defer reader.Close() seekN, err := reader.Seek(int64(lineStart), 0) if err != nil { @@ -265,8 +283,9 @@ func TestFileReader(t *testing.T) { fileSize := uint64(100) blockSize := uint32(10) linesPerBlock := uint32(2) - fileInode, _, _, lineStart, line := createFileWithNewlines(t, fileSize, blockSize, "", mockClient, linesPerBlock) - reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, "", false) + podpassword, _ := utils.GetRandString(pod.PodPasswordLength) + _, fileInode, _, _, lineStart, line := createFileWithNewlines(t, fileSize, blockSize, "", podpassword, mockClient, linesPerBlock) + reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, "", podpassword, false) defer reader.Close() seekN, err := reader.Seek(int64(lineStart), 0) if err != nil { @@ -289,8 +308,9 @@ func TestFileReader(t *testing.T) { blockSize := uint32(10) linesPerBlock := uint32(2) compression := "snappy" - fileInode, _, _, lineStart, line := createFileWithNewlines(t, fileSize, blockSize, compression, mockClient, linesPerBlock) - reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, compression, false) + podpassword, _ := utils.GetRandString(pod.PodPasswordLength) + _, fileInode, _, _, lineStart, line := createFileWithNewlines(t, fileSize, blockSize, compression, podpassword, mockClient, linesPerBlock) + reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, compression, podpassword, false) defer reader.Close() seekN, err := reader.Seek(int64(lineStart), 0) if err != nil { @@ -313,8 +333,9 @@ func TestFileReader(t *testing.T) { blockSize := uint32(10) linesPerBlock := uint32(2) compression := "snappy" - fileInode, _, _, lineStart, line := createFileWithNewlines(t, fileSize, blockSize, compression, mockClient, linesPerBlock) - reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, compression, true) + podpassword, _ := utils.GetRandString(pod.PodPasswordLength) + _, fileInode, _, _, lineStart, line := createFileWithNewlines(t, fileSize, blockSize, compression, podpassword, mockClient, linesPerBlock) + reader := file.NewReader(fileInode, mockClient, fileSize, blockSize, compression, podpassword, true) defer reader.Close() seekN, err := reader.Seek(int64(lineStart), 0) if err != nil { @@ -350,12 +371,13 @@ func TestFileReader(t *testing.T) { }) } -func createFile(t *testing.T, fileSize uint64, blockSize uint32, compression string, mockClient *mock.BeeClient) file.INode { +func createFile(t *testing.T, fileSize uint64, blockSize uint32, compression, podPassword string, mockClient *mock.BeeClient) ([]byte, file.INode) { var fileBlocks []*file.BlockInfo noOfBlocks := fileSize / uint64(blockSize) if fileSize%uint64(blockSize) != 0 { noOfBlocks += 1 } + content := []byte{} bytesRemaining := fileSize for i := uint64(0); i < noOfBlocks; i++ { bytesToWrite := blockSize @@ -367,6 +389,7 @@ func createFile(t *testing.T, fileSize uint64, blockSize uint32, compression str if err != nil { t.Fatal(err) } + content = append(content, buf...) if compression != "" { compressedData, err := file.Compress(buf, compression, bytesToWrite) if err != nil { @@ -374,14 +397,15 @@ func createFile(t *testing.T, fileSize uint64, blockSize uint32, compression str } buf = compressedData } - - addr, err := mockClient.UploadBlob(buf, true, true) + encryptedData, enErr := utils.EncryptBytes([]byte(podPassword), buf) + if enErr != nil { + t.Fatal(err) + } + addr, err := mockClient.UploadBlob(encryptedData, true, true) if err != nil { t.Fatal(err) } - blockName := fmt.Sprintf("block-%05d", i) fileBlock := &file.BlockInfo{ - Name: blockName, Size: bytesToWrite, CompressedSize: uint32(len(buf)), Reference: utils.NewReference(addr), @@ -390,12 +414,12 @@ func createFile(t *testing.T, fileSize uint64, blockSize uint32, compression str bytesRemaining -= uint64(bytesToWrite) } - return file.INode{ + return content, file.INode{ Blocks: fileBlocks, } } -func createFileWithNewlines(t *testing.T, fileSize uint64, blockSize uint32, compression string, mockClient *mock.BeeClient, linesPerBlock uint32) (file.INode, int, []byte, int, []byte) { +func createFileWithNewlines(t *testing.T, fileSize uint64, blockSize uint32, compression, podPassword string, mockClient *mock.BeeClient, linesPerBlock uint32) ([]byte, file.INode, int, []byte, int, []byte) { var fileBlocks []*file.BlockInfo noOfBlocks := fileSize / uint64(blockSize) if fileSize%uint64(blockSize) != 0 { @@ -410,6 +434,8 @@ func createFileWithNewlines(t *testing.T, fileSize uint64, blockSize uint32, com var borderCrossingLine []byte bytesWritten := 0 + content := []byte{} + for i := uint64(0); i < noOfBlocks; i++ { bytesToWrite := blockSize if bytesRemaining < uint64(blockSize) { @@ -490,6 +516,7 @@ func createFileWithNewlines(t *testing.T, fileSize uint64, blockSize uint32, com } } } + content = append(content, buf...) if compression != "" { compressedData, err := file.Compress(buf, compression, bytesToWrite) if err != nil { @@ -497,13 +524,16 @@ func createFileWithNewlines(t *testing.T, fileSize uint64, blockSize uint32, com } buf = compressedData } - addr, err := mockClient.UploadBlob(buf, true, true) + + encryptedData, enErr := utils.EncryptBytes([]byte(podPassword), buf) + if enErr != nil { + t.Fatal(err) + } + addr, err := mockClient.UploadBlob(encryptedData, true, true) if err != nil { t.Fatal(err) } - blockName := fmt.Sprintf("block-%05d", i) fileBlock := &file.BlockInfo{ - Name: blockName, Size: bytesToWrite, CompressedSize: uint32(len(buf)), Reference: utils.NewReference(addr), @@ -512,39 +542,11 @@ func createFileWithNewlines(t *testing.T, fileSize uint64, blockSize uint32, com bytesRemaining -= uint64(bytesToWrite) bytesWritten += int(bytesToWrite) } - return file.INode{ + return content, file.INode{ Blocks: fileBlocks, }, randomLineStartPoint, randomLine, borderCrossingLineStartingPoint, borderCrossingLine } -func checkFileContents(t *testing.T, fileInode file.INode, outputBytes []byte, mockClient *mock.BeeClient, compression string) bool { - var inpBuf []byte - fileSize := uint32(0) - for _, block := range fileInode.Blocks { - buf, _, err := mockClient.DownloadBlob(block.Reference.Bytes()) - if err != nil { - t.Fatal(err) - } - - deflatedBuf, err := file.Decompress(buf, compression, block.Size) - if err != nil { - t.Fatal(err) - } - fileSize += block.Size - inpBuf = append(inpBuf, deflatedBuf...) - } - - inputBytes := make([]byte, fileSize) - copy(inputBytes, inpBuf[:fileSize]) - - for i := range inputBytes { - if inputBytes[i] != outputBytes[i] { - fmt.Println(i) - } - } - return bytes.Equal(inputBytes, outputBytes) -} - func readFileContents(t *testing.T, fileSize uint64, reader *file.Reader) []byte { outputBytes := make([]byte, fileSize) count := uint64(0) diff --git a/pkg/file/rename_test.go b/pkg/file/rename_test.go new file mode 100644 index 00000000..66d667df --- /dev/null +++ b/pkg/file/rename_test.go @@ -0,0 +1,174 @@ +package file_test + +import ( + "bytes" + "context" + "io" + "testing" + "time" + + "github.com/fairdatasociety/fairOS-dfs/pkg/pod" + + "github.com/fairdatasociety/fairOS-dfs/pkg/account" + "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" + "github.com/fairdatasociety/fairOS-dfs/pkg/dir" + "github.com/fairdatasociety/fairOS-dfs/pkg/feed" + "github.com/fairdatasociety/fairOS-dfs/pkg/file" + "github.com/fairdatasociety/fairOS-dfs/pkg/logging" + "github.com/fairdatasociety/fairOS-dfs/pkg/utils" + "github.com/plexsysio/taskmanager" +) + +func TestRename(t *testing.T) { + mockClient := mock.NewMockBeeClient() + logger := logging.New(io.Discard, 0) + acc := account.New(logger) + _, _, err := acc.CreateUserAccount("") + if err != nil { + t.Fatal(err) + } + pod1AccountInfo, err := acc.CreatePodAccount(1, false) + if err != nil { + t.Fatal(err) + } + fd := feed.New(pod1AccountInfo, mockClient, logger) + user := acc.GetAddress(1) + tm := taskmanager.New(1, 10, time.Second*15, logger) + defer func() { + _ = tm.Stop(context.Background()) + }() + t.Run("upload-rename-same-dir-download-small-file", func(t *testing.T) { + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + filePath := "/dir1" + fileName := "file1" + newFileName := "file_new" + compression := "" + fileSize := int64(100) + blockSize := uint32(10) + fileObject := file.NewFile("pod1", mockClient, fd, user, tm, logger) + + // file existent check + podFile := utils.CombinePathAndFile(filePath, fileName) + if fileObject.IsFileAlreadyPresent(podFile) { + t.Fatal("file should not be present") + } + _, _, err = fileObject.Download(podFile, podPassword) + if err == nil { + t.Fatal("file should not be present for download") + } + // upload a file + content, err := uploadFile(t, fileObject, filePath, fileName, compression, podPassword, fileSize, blockSize) + if err != nil { + t.Fatal(err) + } + + newPodFile := utils.CombinePathAndFile(filePath, newFileName) + _, err = fileObject.RenameFromFileName(podFile, newPodFile, podPassword) + if err != nil { + t.Fatal(err) + } + + // Download the file and read from reader + present := fileObject.IsFileAlreadyPresent(podFile) + if present { + t.Fatal("old name should not be present") + } + + // Download the file and read from reader + reader, rcvdSize, err := fileObject.Download(utils.CombinePathAndFile(filePath, newFileName), podPassword) + if err != nil { + t.Fatal(err) + } + rcvdBuffer := new(bytes.Buffer) + _, err = rcvdBuffer.ReadFrom(reader) + if err != nil { + t.Fatal(err) + } + + // validate the result + if len(rcvdBuffer.Bytes()) != len(content) || int(rcvdSize) != len(content) { + t.Fatalf("downloaded content size is invalid") + } + if !bytes.Equal(content, rcvdBuffer.Bytes()) { + t.Fatalf("downloaded content is not equal") + } + + }) + + t.Run("upload-rename-diff-dir-download-small-file", func(t *testing.T) { + filePath := "/dir1" + newFilePath := "/dir2" + fileName := "file1" + compression := "" + fileSize := int64(100) + blockSize := uint32(10) + fileObject := file.NewFile("pod1", mockClient, fd, user, tm, logger) + dirObject := dir.NewDirectory("pod1", mockClient, fd, user, fileObject, tm, logger) + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + + // make root dir so that other directories can be added + err = dirObject.MkRootDir("pod1", podPassword, user, fd) + if err != nil { + t.Fatal(err) + } + + // populate the directory with few directory and files + err = dirObject.MkDir(filePath, podPassword) + if err != nil { + t.Fatal(err) + } + err = dirObject.MkDir(newFilePath, podPassword) + if err != nil { + t.Fatal(err) + } + + // file existent check + podFile := utils.CombinePathAndFile(filePath, fileName) + if fileObject.IsFileAlreadyPresent(podFile) { + t.Fatal("file should not be present") + } + + // upload a file + content, err := uploadFile(t, fileObject, filePath, fileName, compression, podPassword, fileSize, blockSize) + if err != nil { + t.Fatal(err) + } + newPodFile := utils.CombinePathAndFile(newFilePath, fileName) + if fileObject.IsFileAlreadyPresent(newPodFile) { + t.Fatal("file should not be present") + } + _, err = fileObject.RenameFromFileName(podFile, newPodFile, podPassword) + if err != nil { + t.Fatal(err) + } + + // Download the file and read from reader + present := fileObject.IsFileAlreadyPresent(podFile) + if present { + t.Fatal("old name should not be present") + } + + present = fileObject.IsFileAlreadyPresent(newPodFile) + if !present { + t.Fatal("new name should be present") + } + // Download the file and read from reader + reader, rcvdSize, err := fileObject.Download(newPodFile, podPassword) + if err != nil { + t.Fatal(err) + } + rcvdBuffer := new(bytes.Buffer) + _, err = rcvdBuffer.ReadFrom(reader) + if err != nil { + t.Fatal(err) + } + + // validate the result + if len(rcvdBuffer.Bytes()) != len(content) || int(rcvdSize) != len(content) { + t.Fatalf("downloaded content size is invalid") + } + if !bytes.Equal(content, rcvdBuffer.Bytes()) { + t.Fatalf("downloaded content is not equal") + } + }) +} diff --git a/pkg/file/rm.go b/pkg/file/rm.go index c2614e81..d02c5cc9 100644 --- a/pkg/file/rm.go +++ b/pkg/file/rm.go @@ -18,6 +18,7 @@ package file import ( "encoding/json" + "errors" "fmt" "net/http" @@ -25,19 +26,24 @@ import ( "github.com/fairdatasociety/fairOS-dfs/pkg/utils" ) -// RmFile deletes all the blocks of a file and it related meta data from the Swarm network. -func (f *File) RmFile(podFileWithPath string) error { +// RmFile deletes all the blocks of a file, and it related metadata from the Swarm network. +func (f *File) RmFile(podFileWithPath, podPassword string) error { totalFilePath := utils.CombinePathAndFile(podFileWithPath, "") - meta, err := f.GetMetaFromFileName(totalFilePath, f.userAddress) + meta, err := f.GetMetaFromFileName(totalFilePath, podPassword, f.userAddress) + if errors.Is(err, ErrDeletedFeed) { // skipcq: TCV-001 + return nil + } if err != nil { return err } - - fdata, respCode, err := f.client.DownloadBlob(meta.InodeAddress) + encryptedFileInodeBytes, respCode, err := f.client.DownloadBlob(meta.InodeAddress) + if err != nil { // skipcq: TCV-001 + return err + } + fileInodeBytes, err := utils.DecryptBytes([]byte(podPassword), encryptedFileInodeBytes) if err != nil { // skipcq: TCV-001 return err } - if respCode != http.StatusOK { // skipcq: TCV-001 f.logger.Warningf("could not remove blocks in %s", swarm.NewAddress(meta.InodeAddress).String()) return fmt.Errorf("could not remove blocks in %v", swarm.NewAddress(meta.InodeAddress).String()) @@ -45,11 +51,12 @@ func (f *File) RmFile(podFileWithPath string) error { // find the inode and remove the blocks present in the inode one by one var fInode *INode - err = json.Unmarshal(fdata, &fInode) + err = json.Unmarshal(fileInodeBytes, &fInode) if err != nil { // skipcq: TCV-001 f.logger.Warningf("could not unmarshall data in address %s", swarm.NewAddress(meta.InodeAddress).String()) return fmt.Errorf("could not unmarshall data in address %v", swarm.NewAddress(meta.InodeAddress).String()) } + err = f.client.DeleteReference(meta.InodeAddress) if err != nil { f.logger.Errorf("could not delete file inode %s", swarm.NewAddress(meta.InodeAddress).String()) @@ -62,11 +69,10 @@ func (f *File) RmFile(podFileWithPath string) error { return fmt.Errorf("could not delete file inode %v", swarm.NewAddress(fblocks.Reference.Bytes()).String()) } } - // remove the meta topic := utils.HashString(totalFilePath) - _, err = f.fd.UpdateFeed(topic, f.userAddress, []byte(utils.DeletedFeedMagicWord)) // empty byte array will fail, so some 1 byte - if err != nil { // skipcq: TCV-001 + _, err = f.fd.UpdateFeed(topic, f.userAddress, []byte(utils.DeletedFeedMagicWord), []byte(podPassword)) // empty byte array will fail, so some 1 byte + if err != nil { // skipcq: TCV-001 return err } diff --git a/pkg/file/rm_test.go b/pkg/file/rm_test.go index 31389484..9df88ac2 100644 --- a/pkg/file/rm_test.go +++ b/pkg/file/rm_test.go @@ -17,8 +17,14 @@ limitations under the License. package file_test import ( + "context" "io" "testing" + "time" + + "github.com/fairdatasociety/fairOS-dfs/pkg/pod" + + "github.com/plexsysio/taskmanager" "github.com/fairdatasociety/fairOS-dfs/pkg/file" "github.com/fairdatasociety/fairOS-dfs/pkg/utils" @@ -33,37 +39,41 @@ func TestRemoveFile(t *testing.T) { mockClient := mock.NewMockBeeClient() logger := logging.New(io.Discard, 0) acc := account.New(logger) - _, _, err := acc.CreateUserAccount("password", "") + _, _, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } - pod1AccountInfo, err := acc.CreatePodAccount(1, "password", false) + pod1AccountInfo, err := acc.CreatePodAccount(1, false) if err != nil { t.Fatal(err) } fd := feed.New(pod1AccountInfo, mockClient, logger) user := acc.GetAddress(1) - + tm := taskmanager.New(1, 10, time.Second*15, logger) + defer func() { + _ = tm.Stop(context.Background()) + }() t.Run("delete-file", func(t *testing.T) { - fileObject := file.NewFile("pod1", mockClient, fd, user, logger) + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + fileObject := file.NewFile("pod1", mockClient, fd, user, tm, logger) // remove file2 - err = fileObject.RmFile("/dir1/file2") + err = fileObject.RmFile("/dir1/file2", podPassword) if err == nil { t.Fatal("file not present") } // upload few files - _, err = uploadFile(t, fileObject, "/dir1", "file1", "", 100, 10) + _, err = uploadFile(t, fileObject, "/dir1", "file1", "", podPassword, 100, 10) if err != nil { t.Fatal(err) } - _, err = uploadFile(t, fileObject, "/dir1", "file2", "", 200, 20) + _, err = uploadFile(t, fileObject, "/dir1", "file2", "", podPassword, 200, 20) if err != nil { t.Fatal(err) } // remove file2 - err = fileObject.RmFile("/dir1/file2") + err = fileObject.RmFile("/dir1/file2", podPassword) if err != nil { t.Fatal(err) } @@ -82,24 +92,25 @@ func TestRemoveFile(t *testing.T) { if meta.Name != "file1" { t.Fatalf("retrieved invalid file name") } - err := fileObject.LoadFileMeta(utils.CombinePathAndFile("/dir1", "file1")) + err := fileObject.LoadFileMeta(utils.CombinePathAndFile("/dir1", "file1"), podPassword) if err != nil { t.Fatal("loading deleted file meta should be nil") } }) t.Run("delete-file-in-loop", func(t *testing.T) { - fileObject := file.NewFile("pod1", mockClient, fd, user, logger) + fileObject := file.NewFile("pod1", mockClient, fd, user, tm, logger) + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) for i := 0; i < 80; i++ { // upload file1 - _, err = uploadFile(t, fileObject, "/dir1", "file1", "", 100, 10) + _, err = uploadFile(t, fileObject, "/dir1", "file1", "", podPassword, 100, 10) if err != nil { t.Fatal(err) } // remove file1 - err = fileObject.RmFile("/dir1/file1") + err = fileObject.RmFile("/dir1/file1", podPassword) if err != nil { t.Fatal(err) } diff --git a/pkg/file/stat.go b/pkg/file/stat.go index edeb5889..3234de12 100644 --- a/pkg/file/stat.go +++ b/pkg/file/stat.go @@ -21,42 +21,46 @@ import ( "encoding/json" "fmt" "strconv" + + "github.com/fairdatasociety/fairOS-dfs/pkg/utils" ) type Stats struct { - PodName string `json:"pod_name"` - FilePath string `json:"file_path"` - FileName string `json:"file_name"` - FileSize string `json:"file_size"` - BlockSize string `json:"block_size"` - Compression string `json:"compression"` - ContentType string `json:"content_type"` - CreationTime string `json:"creation_time"` - ModificationTime string `json:"modification_time"` - AccessTime string `json:"access_time"` - Blocks []Blocks + PodName string `json:"podName"` + FilePath string `json:"filePath"` + FileName string `json:"fileName"` + FileSize string `json:"fileSize"` + BlockSize string `json:"blockSize"` + Compression string `json:"compression"` + ContentType string `json:"contentType"` + CreationTime string `json:"creationTime"` + ModificationTime string `json:"modificationTime"` + AccessTime string `json:"accessTime"` + Blocks []Blocks `json:"blocks"` } type Blocks struct { - Name string `json:"name"` Reference string `json:"reference"` Size string `json:"size"` - CompressedSize string `json:"compressed_size"` + CompressedSize string `json:"compressedSize"` } // GetStats given a filename this function returns all the information about the file // including the block information. -func (f *File) GetStats(podName, podFileWithPath string) (*Stats, error) { +func (f *File) GetStats(podName, podFileWithPath, podPassword string) (*Stats, error) { meta := f.GetFromFileMap(podFileWithPath) if meta == nil { // skipcq: TCV-001 return nil, fmt.Errorf("file not found") } - fileInodeBytes, _, err := f.getClient().DownloadBlob(meta.InodeAddress) + encryptedFileInodeBytes, _, err := f.getClient().DownloadBlob(meta.InodeAddress) + if err != nil { // skipcq: TCV-001 + return nil, err + } + fileInodeBytes, err := utils.DecryptBytes([]byte(podPassword), encryptedFileInodeBytes) if err != nil { // skipcq: TCV-001 return nil, err } - var fileInode INode err = json.Unmarshal(fileInodeBytes, &fileInode) if err != nil { // skipcq: TCV-001 @@ -66,7 +70,6 @@ func (f *File) GetStats(podName, podFileWithPath string) (*Stats, error) { var fileBlocks []Blocks for _, b := range fileInode.Blocks { fb := Blocks{ - Name: b.Name, Reference: hex.EncodeToString(b.Reference.Bytes()), Size: strconv.Itoa(int(b.Size)), CompressedSize: strconv.Itoa(int(b.CompressedSize)), diff --git a/pkg/file/stat_test.go b/pkg/file/stat_test.go index 38c27aaa..28314b97 100644 --- a/pkg/file/stat_test.go +++ b/pkg/file/stat_test.go @@ -17,9 +17,16 @@ limitations under the License. package file_test import ( + "context" "io" "strconv" "testing" + "time" + + "github.com/fairdatasociety/fairOS-dfs/pkg/pod" + "github.com/fairdatasociety/fairOS-dfs/pkg/utils" + + "github.com/plexsysio/taskmanager" "github.com/fairdatasociety/fairOS-dfs/pkg/account" "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" @@ -32,28 +39,32 @@ func TestStat(t *testing.T) { mockClient := mock.NewMockBeeClient() logger := logging.New(io.Discard, 0) acc := account.New(logger) - _, _, err := acc.CreateUserAccount("password", "") + _, _, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } - pod1AccountInfo, err := acc.CreatePodAccount(1, "password", false) + pod1AccountInfo, err := acc.CreatePodAccount(1, false) if err != nil { t.Fatal(err) } fd := feed.New(pod1AccountInfo, mockClient, logger) user := acc.GetAddress(1) - + tm := taskmanager.New(1, 10, time.Second*15, logger) + defer func() { + _ = tm.Stop(context.Background()) + }() + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) t.Run("stat-file", func(t *testing.T) { - fileObject := file.NewFile("pod1", mockClient, fd, user, logger) + fileObject := file.NewFile("pod1", mockClient, fd, user, tm, logger) // upload a file - _, err = uploadFile(t, fileObject, "/dir1", "file1", "", 100, 10) + _, err = uploadFile(t, fileObject, "/dir1", "file1", "", podPassword, 100, 10) if err != nil { t.Fatal(err) } // stat the file - stats, err := fileObject.GetStats("pod1", "/dir1/file1") + stats, err := fileObject.GetStats("pod1", "/dir1/file1", podPassword) if err != nil { t.Fatal(err) } diff --git a/pkg/file/upload.go b/pkg/file/upload.go index b4757d12..39521f17 100644 --- a/pkg/file/upload.go +++ b/pkg/file/upload.go @@ -23,6 +23,7 @@ import ( "fmt" "io" "net/http" + "path/filepath" "runtime" "sync" "time" @@ -32,20 +33,31 @@ import ( "github.com/klauspost/pgzip" ) +const ( + minBlockSizeForGzip = 164000 +) + var ( noOfParallelWorkers = runtime.NumCPU() + + ErrGzipBlSize = fmt.Errorf("gzip: block size cannot be less than %d", minBlockSizeForGzip) ) // Upload uploads a given blob of bytes as a file in the pod. It also splits the file into number of blocks. the // size of the block is provided during upload. This function also does compression of the blocks gzip/snappy if it is // requested during the upload. -func (f *File) Upload(fd io.Reader, podFileName string, fileSize int64, blockSize uint32, podPath, compression string) error { +func (f *File) Upload(fd io.Reader, podFileName string, fileSize int64, blockSize uint32, podPath, compression, podPassword string) error { + podPath = filepath.ToSlash(podPath) + // check compression gzip and blocksize + // pgzip does not allow block size lower or equal to 163840 + // so we set block size lower bound to 164000 for + if compression == "gzip" && blockSize < minBlockSizeForGzip { + return ErrGzipBlSize + } reader := bufio.NewReader(fd) now := time.Now().Unix() meta := MetaData{ Version: MetaVersion, - UserAddress: f.userAddress, - PodName: f.podName, Path: podPath, Name: podFileName, Size: uint64(fileSize), @@ -103,18 +115,17 @@ func (f *File) Upload(fd io.Reader, podFileName string, fileSize int64, blockSiz wg.Add(1) worker <- true go func(counter, size int) { - blockName := fmt.Sprintf("block-%05d", counter) defer func() { <-worker wg.Done() if mainErr != nil { // skipcq: TCV-001 - f.logger.Error("failed uploading block ", blockName) + f.logger.Error("failed uploading block ", counter) return } - f.logger.Info("done uploading block ", blockName) + f.logger.Info("done uploading block ", counter) }() - f.logger.Info("Uploading ", blockName) + f.logger.Infof("Uploading %d block", counter) // compress the data uploadData := data[:size] if compression != "" { @@ -125,13 +136,18 @@ func (f *File) Upload(fd io.Reader, podFileName string, fileSize int64, blockSiz } } - addr, uploadErr := f.client.UploadBlob(uploadData, true, true) + encryptedData, enErr := utils.EncryptBytes([]byte(podPassword), uploadData) + if enErr != nil { + mainErr = enErr + return + } + + addr, uploadErr := f.client.UploadBlob(encryptedData, true, true) if uploadErr != nil { mainErr = uploadErr return } fileBlock := &BlockInfo{ - Name: blockName, Size: uint32(size), CompressedSize: uint32(len(uploadData)), Reference: utils.NewReference(addr), @@ -169,14 +185,17 @@ func (f *File) Upload(fd io.Reader, podFileName string, fileSize int64, blockSiz if err != nil { // skipcq: TCV-001 return err } - - addr, err := f.client.UploadBlob(fileInodeData, true, true) + encryptedFileInodeBytes, err := utils.EncryptBytes([]byte(podPassword), fileInodeData) + if err != nil { // skipcq: TCV-001 + return err + } + addr, err := f.client.UploadBlob(encryptedFileInodeBytes, true, true) if err != nil { // skipcq: TCV-001 return err } meta.InodeAddress = addr - err = f.handleMeta(&meta) + err = f.handleMeta(&meta, podPassword) if err != nil { // skipcq: TCV-001 return err } diff --git a/pkg/file/upload_test.go b/pkg/file/upload_test.go index 560772f2..1052dd2c 100644 --- a/pkg/file/upload_test.go +++ b/pkg/file/upload_test.go @@ -17,11 +17,17 @@ limitations under the License. package file_test import ( + "bytes" + "context" "crypto/rand" + "errors" "io" - "io/ioutil" "os" + "path/filepath" "testing" + "time" + + "github.com/fairdatasociety/fairOS-dfs/pkg/pod" "github.com/fairdatasociety/fairOS-dfs/pkg/account" "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" @@ -29,30 +35,37 @@ import ( "github.com/fairdatasociety/fairOS-dfs/pkg/file" "github.com/fairdatasociety/fairOS-dfs/pkg/logging" "github.com/fairdatasociety/fairOS-dfs/pkg/utils" + "github.com/plexsysio/taskmanager" ) func TestUpload(t *testing.T) { mockClient := mock.NewMockBeeClient() logger := logging.New(io.Discard, 0) acc := account.New(logger) - _, _, err := acc.CreateUserAccount("password", "") + _, _, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } - pod1AccountInfo, err := acc.CreatePodAccount(1, "password", false) + pod1AccountInfo, err := acc.CreatePodAccount(1, false) if err != nil { t.Fatal(err) } fd := feed.New(pod1AccountInfo, mockClient, logger) user := acc.GetAddress(1) + tm := taskmanager.New(1, 10, time.Second*15, logger) + defer func() { + _ = tm.Stop(context.Background()) + }() t.Run("upload-small-file", func(t *testing.T) { + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + filePath := "/dir1" fileName := "file1" compression := "" fileSize := int64(100) blockSize := uint32(10) - fileObject := file.NewFile("pod1", mockClient, fd, user, logger) - _, err = uploadFile(t, fileObject, filePath, fileName, compression, fileSize, blockSize) + fileObject := file.NewFile("pod1", mockClient, fd, user, tm, logger) + _, err = uploadFile(t, fileObject, filePath, fileName, compression, podPassword, fileSize, blockSize) if err != nil { t.Fatal(err) } @@ -77,16 +90,16 @@ func TestUpload(t *testing.T) { t.Fatalf("invalid block size in meta") } - err := fileObject.LoadFileMeta(filePath + "/" + fileName) + err := fileObject.LoadFileMeta(filePath+"/"+fileName, podPassword) if err != nil { t.Fatal(err) } - err = fileObject.LoadFileMeta(filePath + "/asd" + fileName) + err = fileObject.LoadFileMeta(filePath+"/asd"+fileName, podPassword) if err == nil { t.Fatal("local file meta should fail") } - meat2, err := fileObject.BackupFromFileName(filePath + "/" + fileName) + meat2, err := fileObject.BackupFromFileName(filePath+"/"+fileName, podPassword) if err != nil { t.Fatal(err) } @@ -96,25 +109,27 @@ func TestUpload(t *testing.T) { }) t.Run("upload-small-file-at-root", func(t *testing.T) { + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + filePath := string(os.PathSeparator) fileName := "file1" compression := "" fileSize := int64(100) blockSize := uint32(10) - fileObject := file.NewFile("pod1", mockClient, fd, user, logger) - _, err = uploadFile(t, fileObject, filePath, fileName, compression, fileSize, blockSize) + fileObject := file.NewFile("pod1", mockClient, fd, user, tm, logger) + _, err = uploadFile(t, fileObject, filePath, fileName, compression, podPassword, fileSize, blockSize) if err != nil { t.Fatal(err) } // check for meta - meta := fileObject.GetFromFileMap(utils.CombinePathAndFile(filePath, fileName)) + meta := fileObject.GetFromFileMap(utils.CombinePathAndFile(filepath.ToSlash(filePath), fileName)) if meta == nil { t.Fatalf("file not added in file map") } // validate meta items - if meta.Path != filePath { + if meta.Path != filepath.ToSlash(filePath) { t.Fatalf("invalid path in meta") } if meta.Name != fileName { @@ -129,25 +144,27 @@ func TestUpload(t *testing.T) { }) t.Run("upload-small-file-at-root-with-blank-filename", func(t *testing.T) { + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + filePath := string(os.PathSeparator) fileName := "file1" compression := "" fileSize := int64(100) blockSize := uint32(10) - fileObject := file.NewFile("pod1", mockClient, fd, user, logger) - _, err = uploadFile(t, fileObject, filePath, fileName, compression, fileSize, blockSize) + fileObject := file.NewFile("pod1", mockClient, fd, user, tm, logger) + _, err = uploadFile(t, fileObject, filePath, fileName, compression, podPassword, fileSize, blockSize) if err != nil { t.Fatal(err) } // check for meta - meta := fileObject.GetFromFileMap(utils.CombinePathAndFile(filePath+fileName, "")) + meta := fileObject.GetFromFileMap(filepath.ToSlash(utils.CombinePathAndFile(filePath+fileName, ""))) if meta == nil { t.Fatalf("file not added in file map") } // validate meta items - if meta.Path != filePath { + if meta.Path != filepath.ToSlash(filePath) { t.Fatalf("invalid path in meta") } if meta.Name != fileName { @@ -162,25 +179,26 @@ func TestUpload(t *testing.T) { }) t.Run("upload-small-file-at-root-with-prefix", func(t *testing.T) { + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) filePath := string(os.PathSeparator) fileName := "file1" compression := "" fileSize := int64(100) blockSize := uint32(10) - fileObject := file.NewFile("pod1", mockClient, fd, user, logger) - _, err = uploadFile(t, fileObject, filePath, fileName, compression, fileSize, blockSize) + fileObject := file.NewFile("pod1", mockClient, fd, user, tm, logger) + _, err = uploadFile(t, fileObject, filePath, fileName, compression, podPassword, fileSize, blockSize) if err != nil { t.Fatal(err) } // check for meta - meta := fileObject.GetFromFileMap(utils.CombinePathAndFile(filePath, string(os.PathSeparator)+fileName)) + meta := fileObject.GetFromFileMap(utils.CombinePathAndFile(filepath.ToSlash(filePath), filepath.ToSlash(string(os.PathSeparator)+fileName))) if meta == nil { t.Fatalf("file not added in file map") } // validate meta items - if meta.Path != filePath { + if meta.Path != filepath.ToSlash(filePath) { t.Fatalf("invalid path in meta") } if meta.Name != fileName { @@ -202,25 +220,26 @@ func TestUpload(t *testing.T) { }) t.Run("upload-small-file-at-root-with-prefix-snappy", func(t *testing.T) { + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) filePath := string(os.PathSeparator) fileName := "file2" compression := "snappy" fileSize := int64(100) blockSize := uint32(10) - fileObject := file.NewFile("pod1", mockClient, fd, user, logger) - _, err = uploadFile(t, fileObject, filePath, fileName, compression, fileSize, blockSize) + fileObject := file.NewFile("pod1", mockClient, fd, user, tm, logger) + _, err = uploadFile(t, fileObject, filePath, fileName, compression, podPassword, fileSize, blockSize) if err != nil { t.Fatal(err) } // check for meta - meta := fileObject.GetFromFileMap(utils.CombinePathAndFile(filePath, string(os.PathSeparator)+fileName)) + meta := fileObject.GetFromFileMap(utils.CombinePathAndFile(filepath.ToSlash(filePath), filepath.ToSlash(string(os.PathSeparator)+fileName))) if meta == nil { t.Fatalf("file not added in file map") } // validate meta items - if meta.Path != filePath { + if meta.Path != filepath.ToSlash(filePath) { t.Fatalf("invalid path in meta") } if meta.Name != fileName { @@ -240,11 +259,67 @@ func TestUpload(t *testing.T) { t.Fatal("meta2 should be nil") } }) + + t.Run("upload-small-file-at-root-with-prefix-gzip", func(t *testing.T) { + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + filePath := string(os.PathSeparator) + fileName := "file2" + compression := "gzip" + fileSize := int64(100) + blockSize := uint32(164000) + fileObject := file.NewFile("pod1", mockClient, fd, user, tm, logger) + + _, err = uploadFile(t, fileObject, filePath, fileName, compression, podPassword, fileSize, uint32(163999)) + if !errors.Is(file.ErrGzipBlSize, err) { + t.Fatal("should provide higher block size") + } + + _, err = uploadFile(t, fileObject, filePath, fileName, compression, podPassword, fileSize, blockSize) + if err != nil { + t.Fatal(err) + } + + // check for meta + fp := utils.CombinePathAndFile(filepath.ToSlash(filePath), filepath.ToSlash(string(os.PathSeparator)+fileName)) + meta := fileObject.GetFromFileMap(fp) + if meta == nil { + t.Fatalf("file not added in file map") + } + + // validate meta items + if meta.Path != filepath.ToSlash(filePath) { + t.Fatalf("invalid path in meta") + } + if meta.Name != fileName { + t.Fatalf("invalid file name in meta") + } + if meta.Size != uint64(fileSize) { + t.Fatalf("invalid file size in meta") + } + if meta.BlockSize != blockSize { + t.Fatalf("invalid block size in meta") + } + reader, _, err := fileObject.Download(fp, podPassword) + if err != nil { + t.Fatal(err) + } + rcvdBuffer := new(bytes.Buffer) + _, err = rcvdBuffer.ReadFrom(reader) + if err != nil { + t.Fatal(err) + } + fileObject.RemoveAllFromFileMap() + + meta2 := fileObject.GetFromFileMap(fp) + if meta2 != nil { + t.Fatal("meta2 should be nil") + } + }) } -func uploadFile(t *testing.T, fileObject *file.File, filePath, fileName, compression string, fileSize int64, blockSize uint32) ([]byte, error) { +func uploadFile(t *testing.T, fileObject *file.File, filePath, fileName, compression, podPassword string, fileSize int64, blockSize uint32) ([]byte, error) { // create a temp file - fd, err := ioutil.TempFile("", fileName) + fd, err := os.CreateTemp("", fileName) if err != nil { t.Fatal(err) } @@ -274,5 +349,5 @@ func uploadFile(t *testing.T, fileObject *file.File, filePath, fileName, compres } // upload the temp file - return content, fileObject.Upload(f1, fileName, fileSize, blockSize, filePath, compression) + return content, fileObject.Upload(f1, fileName, fileSize, blockSize, filePath, compression, podPassword) } diff --git a/pkg/file/writeAt.go b/pkg/file/writeAt.go new file mode 100644 index 00000000..51c14581 --- /dev/null +++ b/pkg/file/writeAt.go @@ -0,0 +1,269 @@ +package file + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "sync" + + "github.com/fairdatasociety/fairOS-dfs/pkg/utils" +) + +// WriteAt writes a file from a given offset +func (f *File) WriteAt(podFileWithPath, podPassword string, update io.Reader, offset uint64, truncate bool) (int, error) { + // check file is present + totalFilePath := utils.CombinePathAndFile(podFileWithPath, "") + if !f.IsFileAlreadyPresent(totalFilePath) { + return 0, ErrFileNotPresent + } + + // get file meta + meta := f.GetFromFileMap(totalFilePath) + if meta == nil { // skipcq: TCV-001 + return 0, ErrFileNotFound + } + + // download file inode (blocks info) + encryptedFileInodeBytes, _, err := f.getClient().DownloadBlob(meta.InodeAddress) + if err != nil { // skipcq: TCV-001 + return 0, err + } + fileInodeBytes, err := utils.DecryptBytes([]byte(podPassword), encryptedFileInodeBytes) + if err != nil { // skipcq: TCV-001 + return 0, err + } + var fileInode INode + err = json.Unmarshal(fileInodeBytes, &fileInode) + if err != nil { // skipcq: TCV-001 + return 0, err + } + + // create file reader + fd := NewReader(fileInode, f.getClient(), meta.Size, meta.BlockSize, meta.Compression, podPassword, false) + reader := &bytes.Buffer{} + _, err = reader.ReadFrom(fd) + if err != nil { + return 0, err + } + + // prepare updater + updater := &bytes.Buffer{} + _, err = updater.ReadFrom(update) + if err != nil { + return 0, err + } + + // get file size + dataSize := uint64(reader.Len()) + + // updater size + updaterSize := uint64(updater.Len()) + + if offset > dataSize { + return 0, fmt.Errorf("wrong offset") + } + + newDataSize := dataSize + if truncate { + newDataSize = updaterSize + } + endofst := offset + updaterSize + if endofst > dataSize { + newDataSize = endofst + } + startingBlock := offset / uint64(meta.BlockSize) + readStartPoint := startingBlock * uint64(meta.BlockSize) + reader.Next(int(readStartPoint)) + blockOffset := offset - readStartPoint + var totalLength = readStartPoint + i := startingBlock + errC := make(chan error) + doneC := make(chan bool) + worker := make(chan bool, noOfParallelWorkers) + var wg sync.WaitGroup + + refMap := map[int]*BlockInfo{} + for k, v := range fileInode.Blocks { + refMap[k] = v + } + + refMapMu := sync.RWMutex{} + var contentBytes []byte + wg.Add(1) + go func() { + var mainErr error + for { + if !(totalLength < newDataSize && updater.Len() != 0) { + wg.Done() + break + } + if mainErr != nil { // skipcq: TCV-001 + errC <- mainErr + wg.Done() + return + } + data := []byte{} + n := 0 + var err error + if totalLength < offset { + temp := make([]byte, blockOffset) + n, err = reader.Read(temp) + if err != nil { + if err == io.EOF { + if totalLength < meta.Size { // skipcq: TCV-001 + errC <- fmt.Errorf("invalid file length of file data received") + return + } + wg.Done() + break + } + errC <- err // skipcq: TCV-001 + return + } + data = append(data, temp[:n]...) + totalLength += uint64(n) + } + if totalLength >= offset && totalLength < endofst && uint32(len(data)) != meta.BlockSize { + temp := make([]byte, meta.BlockSize-uint32(n)) + n, err = updater.Read(temp) + if err != nil { + if err == io.EOF { + if totalLength < meta.Size { // skipcq: TCV-001 + errC <- fmt.Errorf("invalid file length of file data received") + return + } + wg.Done() + break + } + errC <- err // skipcq: TCV-001 + return + } + data = append(data, temp[:n]...) + totalLength += uint64(n) + if reader.Len() > 0 { + reader.Next(n) + } + } + + if uint32(len(data)) != meta.BlockSize && !truncate { + if totalLength < dataSize && uint32(len(data)) != meta.BlockSize { + temp := make([]byte, meta.BlockSize-uint32(len(data))) + n, err = reader.Read(temp) + if err != nil { + if err == io.EOF { + if totalLength < meta.Size { // skipcq: TCV-001 + errC <- fmt.Errorf("invalid file length of file data received") + return + } + wg.Done() + break + } + errC <- err // skipcq: TCV-001 + return + } + data = append(data, temp...) + totalLength += uint64(n) + } + } + // determine the content type from the first 512 bytes of the file + if len(contentBytes) < 512 { + contentBytes = append(contentBytes, data[:n]...) + if len(contentBytes) >= 512 { // skipcq: TCV-001 + cBytes := bytes.NewReader(contentBytes[:512]) + cReader := bufio.NewReader(cBytes) + meta.ContentType = f.getContentType(cReader) + } + } + + wg.Add(1) + worker <- true + go func(counter, size int) { + defer func() { + <-worker + wg.Done() + if mainErr != nil { // skipcq: TCV-001 + return + } + }() + f.logger.Infof("Uploading %d block", counter) + // compress the data + uploadData := data + if meta.Compression != "" { + uploadData, err = compress(data, meta.Compression, meta.BlockSize) + if err != nil { // skipcq: TCV-001 + mainErr = err + return + } + } + + encryptedData, enErr := utils.EncryptBytes([]byte(podPassword), uploadData) + if enErr != nil { + mainErr = enErr + return + } + + addr, uploadErr := f.client.UploadBlob(encryptedData, true, true) + if uploadErr != nil { + mainErr = uploadErr + return + } + + fileBlock := &BlockInfo{ + Size: uint32(size), + CompressedSize: uint32(len(uploadData)), + Reference: utils.NewReference(addr), + } + + refMapMu.Lock() + defer refMapMu.Unlock() + refMap[counter] = fileBlock + }(int(i), n) + + i++ + } + }() + + go func() { + wg.Wait() + close(doneC) + }() + select { + case <-doneC: + break + case err := <-errC: // skipcq: TCV-001 + close(errC) + return 0, err + } + + // copy the block references to the fileInode + fileInode.Blocks = []*BlockInfo{} + for j := 0; j < len(refMap); j++ { + fileInode.Blocks = append(fileInode.Blocks, refMap[j]) + if truncate && i == uint64(j) { + break + } + } + fileInodeData, err := json.Marshal(fileInode) + if err != nil { // skipcq: TCV-001 + return 0, err + } + encryptedFileInodeBytes, err = utils.EncryptBytes([]byte(podPassword), fileInodeData) + if err != nil { // skipcq: TCV-001 + return 0, err + } + addr, err := f.client.UploadBlob(encryptedFileInodeBytes, true, true) + if err != nil { // skipcq: TCV-001 + return 0, err + } + + meta.InodeAddress = addr + meta.Size = newDataSize + err = f.handleMeta(meta, podPassword) + if err != nil { // skipcq: TCV-001 + return 0, err + } + f.AddToFileMap(utils.CombinePathAndFile(meta.Path, meta.Name), meta) + return int(updaterSize), nil +} diff --git a/pkg/file/writeAt_test.go b/pkg/file/writeAt_test.go new file mode 100644 index 00000000..1c850083 --- /dev/null +++ b/pkg/file/writeAt_test.go @@ -0,0 +1,481 @@ +package file_test + +import ( + "bytes" + "context" + "errors" + "io" + "math/rand" + "os" + "path/filepath" + "testing" + "time" + + "github.com/fairdatasociety/fairOS-dfs/pkg/pod" + + "github.com/fairdatasociety/fairOS-dfs/pkg/account" + "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" + "github.com/fairdatasociety/fairOS-dfs/pkg/feed" + "github.com/fairdatasociety/fairOS-dfs/pkg/file" + "github.com/fairdatasociety/fairOS-dfs/pkg/logging" + "github.com/fairdatasociety/fairOS-dfs/pkg/utils" + "github.com/plexsysio/taskmanager" +) + +func TestWriteAt(t *testing.T) { + mockClient := mock.NewMockBeeClient() + logger := logging.New(io.Discard, 0) + acc := account.New(logger) + _, _, err := acc.CreateUserAccount("") + if err != nil { + t.Fatal(err) + } + pod1AccountInfo, err := acc.CreatePodAccount(1, false) + if err != nil { + t.Fatal(err) + } + fd := feed.New(pod1AccountInfo, mockClient, logger) + user := acc.GetAddress(1) + tm := taskmanager.New(1, 10, time.Second*15, logger) + defer func() { + _ = tm.Stop(context.Background()) + }() + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + + t.Run("writeAt-non-existent-file", func(t *testing.T) { + filePath := string(os.PathSeparator) + fileName := "file1" + + var offset uint64 = 3 + + fileObject := file.NewFile("pod1", mockClient, fd, user, tm, logger) + + fp := utils.CombinePathAndFile(filepath.ToSlash(filePath+fileName), "") + + update := []byte("123") + rewrite := &bytes.Buffer{} + rewrite.Write(update) + _, err = fileObject.WriteAt(fp, podPassword, rewrite, offset, false) + if !errors.Is(file.ErrFileNotPresent, err) { + t.Fatal("file should not be present") + } + }) + + t.Run("upload-update-known-very-small-file", func(t *testing.T) { + filePath := string(os.PathSeparator) + fileName := "file1" + compression := "" + blockSize := uint32(10) + var offset uint64 = 3 + + fileObject := file.NewFile("pod1", mockClient, fd, user, tm, logger) + dt, err := uploadFileKnownContent(t, fileObject, filePath, fileName, compression, podPassword, blockSize) + if err != nil { + t.Fatal(err) + } + + fp := utils.CombinePathAndFile(filepath.ToSlash(filePath+fileName), "") + // check for meta + meta := fileObject.GetFromFileMap(fp) + if meta == nil { + t.Fatalf("file not added in file map") + } + + // validate meta items + if meta.Path != filepath.ToSlash(filePath) { + t.Fatalf("invalid path in meta") + } + if meta.Name != fileName { + t.Fatalf("invalid file name in meta") + } + if meta.Size != uint64(len(dt)) { + t.Fatalf("invalid file size in meta") + } + if meta.BlockSize != blockSize { + t.Fatalf("invalid block size in meta") + } + reader, _, err := fileObject.Download(fp, podPassword) + if err != nil { + t.Fatal(err) + } + rcvdBuffer := new(bytes.Buffer) + _, err = rcvdBuffer.ReadFrom(reader) + if err != nil { + t.Fatal(err) + } + reader.Close() + reader2, _, err := fileObject.Download(fp, podPassword) + if err != nil { + t.Fatal(err) + } + rcvdBuffer2 := new(bytes.Buffer) + _, err = rcvdBuffer2.ReadFrom(reader2) + if err != nil { + t.Fatal(err) + } + reader, _, err = fileObject.Download(fp, podPassword) + if err != nil { + t.Fatal(err) + } + + rcvdBuffer3 := new(bytes.Buffer) + _, err = rcvdBuffer3.ReadFrom(reader) + if err != nil { + t.Fatal(err) + } + + update := []byte("123") + rewrite := &bytes.Buffer{} + rewrite.Write(update) + _, err = fileObject.WriteAt(fp, podPassword, rewrite, offset, false) + if err != nil { + t.Fatal(err) + } + reader, _, err = fileObject.Download(fp, podPassword) + if err != nil { + t.Fatal(err) + } + rcvdBuffer = new(bytes.Buffer) + _, err = rcvdBuffer.ReadFrom(reader) + if err != nil { + t.Fatal(err) + } + updatedContent := append(dt[:offset], update...) + + if uint64(len(update))+offset < uint64(len(dt)) { + updatedContent = append(updatedContent, dt[uint64(len(update))+offset:]...) + } + + if !bytes.Equal(updatedContent, rcvdBuffer.Bytes()) { + t.Fatal("content is different") + } + + fileObject.RemoveAllFromFileMap() + + meta2 := fileObject.GetFromFileMap(fp) + if meta2 != nil { + t.Fatal("meta2 should be nil") + } + }) + + t.Run("upload-update-truncate-known-very-small-file", func(t *testing.T) { + filePath := string(os.PathSeparator) + fileName := "file1" + compression := "" + blockSize := uint32(20) + var offset uint64 = 0 + + fileObject := file.NewFile("pod1", mockClient, fd, user, tm, logger) + dt, err := uploadFileKnownContent(t, fileObject, filePath, fileName, compression, podPassword, blockSize) + if err != nil { + t.Fatal(err) + } + + // check for meta + fp := utils.CombinePathAndFile(filepath.ToSlash(filePath+fileName), "") + meta := fileObject.GetFromFileMap(fp) + if meta == nil { + t.Fatalf("file not added in file map") + } + + // validate meta items + if meta.Path != filepath.ToSlash(filePath) { + t.Fatalf("invalid path in meta") + } + if meta.Name != fileName { + t.Fatalf("invalid file name in meta") + } + if meta.Size != uint64(len(dt)) { + t.Fatalf("invalid file size in meta") + } + if meta.BlockSize != blockSize { + t.Fatalf("invalid block size in meta") + } + + reader, _, err := fileObject.Download(fp, podPassword) + if err != nil { + t.Fatal(err) + } + rcvdBuffer := new(bytes.Buffer) + _, err = rcvdBuffer.ReadFrom(reader) + if err != nil { + t.Fatal(err) + } + + update := []byte("abcdefg 12345") + rewrite := &bytes.Buffer{} + rewrite.Write(update) + _, err = fileObject.WriteAt(fp, podPassword, rewrite, offset, true) + if err != nil { + t.Fatal(err) + } + + reader, _, err = fileObject.Download(fp, podPassword) + if err != nil { + t.Fatal(err) + } + rcvdBuffer = new(bytes.Buffer) + _, err = rcvdBuffer.ReadFrom(reader) + if err != nil { + t.Fatal(err) + } + updatedContent := append(dt[:offset], update...) + + if !bytes.Equal(updatedContent, rcvdBuffer.Bytes()) { + t.Fatal("content is different") + } + + fileObject.RemoveAllFromFileMap() + + meta2 := fileObject.GetFromFileMap(fp) + if meta2 != nil { + t.Fatal("meta2 should be nil") + } + }) + + t.Run("upload-update-small-file", func(t *testing.T) { + filePath := "/dir1" + fileName := "file1" + compression := "" + fileSize := int64(100) + blockSize := uint32(10) + fileObject := file.NewFile("pod1", mockClient, fd, user, tm, logger) + dt, err := uploadFile(t, fileObject, filePath, fileName, compression, podPassword, fileSize, blockSize) + if err != nil { + t.Fatal(err) + } + + // check for meta + fp := utils.CombinePathAndFile(filepath.ToSlash(filePath), fileName) + meta := fileObject.GetFromFileMap(fp) + if meta == nil { + t.Fatalf("file not added in file map") + } + + // validate meta items + if meta.Path != filepath.ToSlash(filePath) { + t.Fatalf("invalid path in meta") + } + if meta.Name != fileName { + t.Fatalf("invalid file name in meta") + } + if meta.Size != uint64(fileSize) { + t.Fatalf("invalid file size in meta") + } + if meta.BlockSize != blockSize { + t.Fatalf("invalid block size in meta") + } + + err = fileObject.LoadFileMeta(filePath+"/"+fileName, podPassword) + if err != nil { + t.Fatal(err) + } + + rand.Seed(time.Now().UnixNano()) + min := 0 + max := int(fileSize) + offset := rand.Intn((max - min + 1) + min) + content, err := utils.GetRandBytes(offset) + if err != nil { + t.Fatal(err) + } + r := bytes.NewReader(content) + n, err := fileObject.WriteAt(fp, podPassword, r, uint64(offset), false) + if n != offset { + t.Fatalf("Failed to update %d bytes", offset-n) + } + if err != nil { + t.Fatal(err) + } + reader, _, err := fileObject.Download(fp, podPassword) + if err != nil { + t.Fatal(err) + } + rcvdBuffer := new(bytes.Buffer) + _, err = rcvdBuffer.ReadFrom(reader) + if err != nil { + t.Fatal(err) + } + updatedContent := append(dt[:offset], content...) + + if uint64(len(content)+offset) < uint64(len(dt)) { + updatedContent = append(updatedContent, dt[uint64(len(content)+offset):]...) + } + + if !bytes.Equal(updatedContent, rcvdBuffer.Bytes()) { + t.Fatal("content is different") + } + fileObject.RemoveAllFromFileMap() + + meta2 := fileObject.GetFromFileMap(fp) + if meta2 != nil { + t.Fatal("meta2 should be nil") + } + }) + + t.Run("upload-update-small-file-at-root-with-prefix-snappy", func(t *testing.T) { + filePath := string(os.PathSeparator) + fileName := "file2" + compression := "snappy" + fileSize := int64(100) + blockSize := uint32(10) + fileObject := file.NewFile("pod1", mockClient, fd, user, tm, logger) + dt, err := uploadFile(t, fileObject, filePath, fileName, compression, podPassword, fileSize, blockSize) + if err != nil { + t.Fatal(err) + } + + // check for meta + fp := utils.CombinePathAndFile(filepath.ToSlash(filePath), fileName) + meta := fileObject.GetFromFileMap(fp) + if meta == nil { + t.Fatalf("file not added in file map") + } + + // validate meta items + if meta.Path != filepath.ToSlash(filePath) { + t.Fatalf("invalid path in meta") + } + if meta.Name != fileName { + t.Fatalf("invalid file name in meta") + } + if meta.Size != uint64(fileSize) { + t.Fatalf("invalid file size in meta") + } + if meta.BlockSize != blockSize { + t.Fatalf("invalid block size in meta") + } + + rand.Seed(time.Now().UnixNano()) + min := 0 + max := int(fileSize) + offset := rand.Intn((max - min + 1) + min) + content, err := utils.GetRandBytes(offset) + if err != nil { + t.Fatal(err) + } + r := bytes.NewReader(content) + n, err := fileObject.WriteAt(fp, podPassword, r, uint64(offset), false) + if n != offset { + t.Fatalf("Failed to update %d bytes", offset-n) + } + if err != nil { + t.Fatal(err) + } + reader, _, err := fileObject.Download(fp, podPassword) + if err != nil { + t.Fatal(err) + } + rcvdBuffer := new(bytes.Buffer) + _, err = rcvdBuffer.ReadFrom(reader) + if err != nil { + t.Fatal(err) + } + updatedContent := append(dt[:offset], content...) + + if uint64(len(content)+offset) < uint64(len(dt)) { + updatedContent = append(updatedContent, dt[uint64(len(content)+offset):]...) + } + + if !bytes.Equal(updatedContent, rcvdBuffer.Bytes()) { + t.Fatal("content is different") + } + + fileObject.RemoveAllFromFileMap() + + meta2 := fileObject.GetFromFileMap(fp) + if meta2 != nil { + t.Fatal("meta2 should be nil") + } + }) + + t.Run("upload-update-small-file-at-root-with-prefix-gzip", func(t *testing.T) { + filePath := "/dir1" + fileName := "file10" + compression := "gzip" + fileSize := int64(100) + blockSize := uint32(164000) + fileObject := file.NewFile("pod1", mockClient, fd, user, tm, logger) + dt, err := uploadFile(t, fileObject, filePath, fileName, compression, podPassword, fileSize, blockSize) + if err != nil { + t.Fatal(err) + } + err = fileObject.LoadFileMeta(filePath+"/"+fileName, podPassword) + if err != nil { + t.Fatal(err) + } + + // check for meta + fp := utils.CombinePathAndFile(filepath.ToSlash(filePath), fileName) + meta := fileObject.GetFromFileMap(fp) + if meta == nil { + t.Fatalf("file not added in file map") + } + + // validate meta items + if meta.Path != filepath.ToSlash(filePath) { + t.Fatalf("invalid path in meta") + } + if meta.Name != fileName { + t.Fatalf("invalid file name in meta") + } + if meta.Size != uint64(fileSize) { + t.Fatalf("invalid file size in meta") + } + if meta.BlockSize != blockSize { + t.Fatalf("invalid block size in meta") + } + + rand.Seed(time.Now().UnixNano()) + min := 0 + max := int(fileSize) + offset := rand.Intn((max - min + 1) + min) + content, err := utils.GetRandBytes(offset) + if err != nil { + t.Fatal(err) + } + r := bytes.NewReader(content) + _, err = fileObject.WriteAt(fp, podPassword, r, uint64(offset), false) + if err != nil { + t.Fatal(err) + } + reader, n1, err := fileObject.Download(fp, podPassword) + if err != nil { + t.Fatal(err) + } + rcvdBuffer := new(bytes.Buffer) + _, err = rcvdBuffer.ReadFrom(reader) + if err != nil { + t.Fatal(err) + } + updatedContent := append(dt[:offset], content...) + + if uint64(len(content)+offset) < uint64(len(dt)) { + updatedContent = append(updatedContent, dt[uint64(len(content)+offset):]...) + } + + if !bytes.Equal(updatedContent, rcvdBuffer.Bytes()[:n1]) { + t.Log("updatedContent", updatedContent) + t.Log("downloadedContent", rcvdBuffer.Bytes()) + t.Fatal("content is different ") + } + + fileObject.RemoveAllFromFileMap() + + meta2 := fileObject.GetFromFileMap(fp) + if meta2 != nil { + t.Fatal("meta2 should be nil") + } + }) +} + +func uploadFileKnownContent(t *testing.T, fileObject *file.File, filePath, fileName, compression, podPassword string, blockSize uint32) ([]byte, error) { + f1 := &bytes.Buffer{} + content := []byte("abcdefghijk abcdefghijk abcdefghijk") + _, err := f1.Write(content) + if err != nil { + t.Fatal(err) + } + // upload the temp file + return content, fileObject.Upload(f1, fileName, int64(len(content)), blockSize, filePath, compression, podPassword) +} diff --git a/pkg/pod/close.go b/pkg/pod/close.go index 69bf539a..2a9077d1 100644 --- a/pkg/pod/close.go +++ b/pkg/pod/close.go @@ -23,7 +23,7 @@ func (p *Pod) ClosePod(podName string) error { return ErrPodNotOpened } - podInfo, err := p.GetPodInfoFromPodMap(podName) + podInfo, _, err := p.GetPodInfoFromPodMap(podName) if err != nil { // skipcq: TCV-001 return err } diff --git a/pkg/pod/close_test.go b/pkg/pod/close_test.go index 9afb2fb9..fb2269a7 100644 --- a/pkg/pod/close_test.go +++ b/pkg/pod/close_test.go @@ -17,8 +17,14 @@ limitations under the License. package pod_test import ( + "context" "io" "testing" + "time" + + "github.com/fairdatasociety/fairOS-dfs/pkg/utils" + + "github.com/plexsysio/taskmanager" "github.com/fairdatasociety/fairOS-dfs/pkg/account" "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" @@ -31,33 +37,38 @@ func TestClose(t *testing.T) { mockClient := mock.NewMockBeeClient() logger := logging.New(io.Discard, 0) acc := account.New(logger) - _, _, err := acc.CreateUserAccount("password", "") + _, _, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } fd := feed.New(acc.GetUserAccountInfo(), mockClient, logger) - pod1 := pod.NewPod(mockClient, fd, acc, logger) + tm := taskmanager.New(1, 10, time.Second*15, logger) + defer func() { + _ = tm.Stop(context.Background()) + }() + pod1 := pod.NewPod(mockClient, fd, acc, tm, logger) podName1 := "test1" t.Run("close-pod", func(t *testing.T) { // create a pod - info, err := pod1.CreatePod(podName1, "password", "") + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + info, err := pod1.CreatePod(podName1, "", podPassword) if err != nil { t.Fatalf("error creating pod %s", podName1) } // make root dir so that other directories can be added - err = info.GetDirectory().MkRootDir("pod1", info.GetPodAddress(), info.GetFeed()) + err = info.GetDirectory().MkRootDir("pod1", podPassword, info.GetPodAddress(), info.GetFeed()) if err != nil { t.Fatal(err) } // create some dir and files - addFilesAndDirectories(t, info, pod1, podName1) + addFilesAndDirectories(t, info, pod1, podName1, podPassword) // verify if the pod is closed - gotPodInfo, err := pod1.GetPodInfoFromPodMap(podName1) + gotPodInfo, _, err := pod1.GetPodInfoFromPodMap(podName1) if err == nil { t.Fatalf("pod not closed") } diff --git a/pkg/pod/del.go b/pkg/pod/del.go index 1f59a259..64824edb 100644 --- a/pkg/pod/del.go +++ b/pkg/pod/del.go @@ -18,20 +18,19 @@ package pod import ( "fmt" - "strings" ) // DeleteOwnPod removed a pod and the list of pods belonging to a user. func (p *Pod) DeleteOwnPod(podName string) error { - pods, sharedPods, err := p.loadUserPods() + podList, err := p.loadUserPods() if err != nil { // skipcq: TCV-001 return err } found := false var podIndex int - for index, pod := range pods { - if strings.Trim(pod, "\n") == podName { - delete(pods, index) + for index, pod := range podList.Pods { + if pod.Name == podName { + podList.Pods = append(podList.Pods[:index], podList.Pods[index+1:]...) podIndex = index found = true } @@ -41,17 +40,17 @@ func (p *Pod) DeleteOwnPod(podName string) error { } // delete tables - podInfo, err := p.GetPodInfoFromPodMap(podName) + podInfo, _, err := p.GetPodInfoFromPodMap(podName) if err != nil { return err } - err = podInfo.GetDocStore().DeleteAllDocumentDBs() + err = podInfo.GetDocStore().DeleteAllDocumentDBs(podInfo.GetPodPassword()) if err != nil { return err } - err = podInfo.GetKVStore().DeleteAllKVTables() + err = podInfo.GetKVStore().DeleteAllKVTables(podInfo.GetPodPassword()) if err != nil { return err } @@ -60,26 +59,21 @@ func (p *Pod) DeleteOwnPod(podName string) error { p.removePodFromPodMap(podName) p.acc.DeletePodAccount(podIndex) - // if last pod is deleted.. something should be there to update the feed - if pods == nil { - pods = make(map[int]string) - pods[0] = "" - } - // remove the pod finally - return p.storeUserPods(pods, sharedPods) + return p.storeUserPods(podList) } // DeleteSharedPod removed a pod and the list of pods shared by other users. func (p *Pod) DeleteSharedPod(podName string) error { - pods, sharedPods, err := p.loadUserPods() + podList, err := p.loadUserPods() if err != nil { // skipcq: TCV-001 return err } + found := false - for index, pod := range sharedPods { - if strings.Trim(pod, "\n") == podName { - delete(sharedPods, index) + for index, pod := range podList.SharedPods { + if pod.Name == podName { + podList.SharedPods = append(podList.SharedPods[:index], podList.SharedPods[index+1:]...) found = true } } @@ -90,11 +84,6 @@ func (p *Pod) DeleteSharedPod(podName string) error { // remove it from other data structures p.removePodFromPodMap(podName) - // if last sharedPods is deleted.. something should be there to update the feed - if sharedPods == nil { - sharedPods = make(map[string]string) - } - // remove the pod finally - return p.storeUserPods(pods, sharedPods) + return p.storeUserPods(podList) } diff --git a/pkg/pod/del_test.go b/pkg/pod/del_test.go index b2aa2417..f313a6a8 100644 --- a/pkg/pod/del_test.go +++ b/pkg/pod/del_test.go @@ -17,10 +17,16 @@ limitations under the License. package pod_test import ( + "context" "io" "sort" "strings" "testing" + "time" + + "github.com/fairdatasociety/fairOS-dfs/pkg/utils" + + "github.com/plexsysio/taskmanager" "github.com/fairdatasociety/fairOS-dfs/pkg/account" "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" @@ -35,18 +41,23 @@ func TestDelete(t *testing.T) { logger := logging.New(io.Discard, 0) acc := account.New(logger) - _, _, err := acc.CreateUserAccount("password", "") + _, _, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } fd := feed.New(acc.GetUserAccountInfo(), mockClient, logger) - pod1 := pod.NewPod(mockClient, fd, acc, logger) + tm := taskmanager.New(1, 10, time.Second*15, logger) + defer func() { + _ = tm.Stop(context.Background()) + }() + pod1 := pod.NewPod(mockClient, fd, acc, tm, logger) podName1 := "test1" podName2 := "test2" t.Run("create-one-pod-and-del", func(t *testing.T) { - _, err := pod1.CreatePod(podName1, "password", "") + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + _, err := pod1.CreatePod(podName1, "", podPassword) if err != nil { t.Fatalf("error creating pod %s", podName1) } @@ -79,7 +90,7 @@ func TestDelete(t *testing.T) { t.Fatalf("delete failed") } - infoGot, err := pod1.GetPodInfoFromPodMap(podName1) + infoGot, _, err := pod1.GetPodInfoFromPodMap(podName1) if err == nil { t.Fatalf("pod not deleted from map") } @@ -89,11 +100,12 @@ func TestDelete(t *testing.T) { }) t.Run("create-two-pod-and-del", func(t *testing.T) { - _, err := pod1.CreatePod(podName1, "password", "") + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + _, err := pod1.CreatePod(podName1, "", podPassword) if err != nil { t.Fatalf("error creating pod %s", podName1) } - _, err = pod1.CreatePod(podName2, "password", "") + _, err = pod1.CreatePod(podName2, "", podPassword) if err != nil { t.Fatalf("error creating pod %s", podName1) } @@ -130,7 +142,7 @@ func TestDelete(t *testing.T) { t.Fatalf("delete pod failed") } - infoGot, err := pod1.GetPodInfoFromPodMap(podName1) + infoGot, _, err := pod1.GetPodInfoFromPodMap(podName1) if err == nil { t.Fatalf("pod not deleted from map") } @@ -138,7 +150,7 @@ func TestDelete(t *testing.T) { t.Fatalf("pod not deleted from map") } - _, err = pod1.GetPodInfoFromPodMap(podName2) + _, _, err = pod1.GetPodInfoFromPodMap(podName2) if err != nil { t.Fatalf("removed wrong pod") } @@ -148,18 +160,19 @@ func TestDelete(t *testing.T) { t.Run("create-pod-and-del-with-tables", func(t *testing.T) { podName := "delPod" for i := 0; i < 10; i++ { - pi, err := pod1.CreatePod(podName, "password", "") + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + pi, err := pod1.CreatePod(podName, "", podPassword) if err != nil { t.Fatalf("error creating pod %s", podName) } - dbTables, err := pi.GetDocStore().LoadDocumentDBSchemas() + dbTables, err := pi.GetDocStore().LoadDocumentDBSchemas(podPassword) if err != nil { t.Fatalf("err doc list %s", podName) } if len(dbTables) != 0 { t.Fatal("doc tables delete failed while pod delete") } - kvTables, err := pi.GetKVStore().LoadKVTables() + kvTables, err := pi.GetKVStore().LoadKVTables(podPassword) if err != nil { t.Fatalf("err kv list %s", podName) } @@ -169,24 +182,24 @@ func TestDelete(t *testing.T) { si := make(map[string]collection.IndexType) si["first_name"] = collection.StringIndex si["age"] = collection.NumberIndex - err = pi.GetDocStore().CreateDocumentDB("dbName", si, true) + err = pi.GetDocStore().CreateDocumentDB("dbName", podPassword, si, true) if err != nil { t.Fatal(err) } - err = pi.GetKVStore().CreateKVTable("kvName", collection.StringIndex) + err = pi.GetKVStore().CreateKVTable("kvName", podPassword, collection.StringIndex) if err != nil { t.Fatal(err) } - dbTables, err = pi.GetDocStore().LoadDocumentDBSchemas() + dbTables, err = pi.GetDocStore().LoadDocumentDBSchemas(podPassword) if err != nil { t.Fatalf("err doc list %s", podName) } if len(dbTables) != 1 { t.Fatal("doc tables create failed while pod delete") } - kvTables, err = pi.GetKVStore().LoadKVTables() + kvTables, err = pi.GetKVStore().LoadKVTables(podPassword) if err != nil { t.Fatalf("err kv list %s", podName) } diff --git a/pkg/pod/info.go b/pkg/pod/info.go index f2178988..fb93c98a 100644 --- a/pkg/pod/info.go +++ b/pkg/pod/info.go @@ -28,6 +28,7 @@ import ( type Info struct { podName string + podPassword string userAddress utils.Address dir *di.Directory file *f.File @@ -45,6 +46,10 @@ func (i *Info) GetPodAddress() utils.Address { return i.userAddress } +func (i *Info) GetPodPassword() string { + return i.podPassword +} + func (i *Info) GetDirectory() *di.Directory { return i.dir } @@ -61,11 +66,13 @@ func (i *Info) GetFeed() *feed.API { return i.feed } +// GetKVStore returns kvStore // skipcq: TCV-001 func (i *Info) GetKVStore() *collection.KeyValue { return i.kvStore } +// GetDocStore returns docStore // skipcq: TCV-001 func (i *Info) GetDocStore() *collection.Document { return i.docStore diff --git a/pkg/pod/ls.go b/pkg/pod/ls.go index e8b5a13b..8c927c53 100644 --- a/pkg/pod/ls.go +++ b/pkg/pod/ls.go @@ -18,20 +18,25 @@ package pod // ListPods List all the available pods belonging to a user. func (p *Pod) ListPods() ([]string, []string, error) { - pods, sharedPods, err := p.loadUserPods() + podList, err := p.loadUserPods() if err != nil { // skipcq: TCV-001 return nil, nil, err } var listPods []string - for _, pod := range pods { - listPods = append(listPods, pod) + for _, pod := range podList.Pods { + listPods = append(listPods, pod.Name) } var listSharedPods []string - for _, pod := range sharedPods { - listSharedPods = append(listSharedPods, pod) + for _, pod := range podList.SharedPods { + listSharedPods = append(listSharedPods, pod.Name) } return listPods, listSharedPods, nil } + +// PodList List all the available pods belonging to a user in json format. +func (p *Pod) PodList() (*PodList, error) { + return p.loadUserPods() +} diff --git a/pkg/pod/ls_test.go b/pkg/pod/ls_test.go index 838e2a35..850d3d86 100644 --- a/pkg/pod/ls_test.go +++ b/pkg/pod/ls_test.go @@ -17,8 +17,13 @@ limitations under the License. package pod import ( - "io" + "os" "testing" + "time" + + "github.com/fairdatasociety/fairOS-dfs/pkg/utils" + + "github.com/plexsysio/taskmanager" "github.com/fairdatasociety/fairOS-dfs/pkg/account" "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" @@ -28,12 +33,13 @@ import ( func TestPod_ListPods(t *testing.T) { mockClient := mock.NewMockBeeClient() - logger := logging.New(io.Discard, 0) + logger := logging.New(os.Stdout, 0) acc := account.New(logger) accountInfo := acc.GetUserAccountInfo() fd := feed.New(accountInfo, mockClient, logger) - pod1 := NewPod(mockClient, fd, acc, logger) - _, _, err := acc.CreateUserAccount("password", "") + tm := taskmanager.New(1, 10, time.Second*15, logger) + pod1 := NewPod(mockClient, fd, acc, tm, logger) + _, _, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } @@ -49,11 +55,12 @@ func TestPod_ListPods(t *testing.T) { }) t.Run("create-two-pods", func(t *testing.T) { - _, err := pod1.CreatePod(podName1, "password", "") + podPassword, _ := utils.GetRandString(PodPasswordLength) + _, err := pod1.CreatePod(podName1, "", podPassword) if err != nil { t.Fatalf("error creating pod: %v", err) } - _, err = pod1.CreatePod(podName2, "password", "") + _, err = pod1.CreatePod(podName2, "", podPassword) if err != nil { t.Fatalf("error creating pod %s", podName1) } diff --git a/pkg/pod/max_pod_test.go b/pkg/pod/max_pod_test.go index 90be34a3..c8cb7d9d 100644 --- a/pkg/pod/max_pod_test.go +++ b/pkg/pod/max_pod_test.go @@ -4,6 +4,9 @@ import ( "errors" "io" "testing" + "time" + + "github.com/plexsysio/taskmanager" "github.com/fairdatasociety/fairOS-dfs/pkg/account" "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" @@ -17,30 +20,36 @@ func TestMaxPods(t *testing.T) { mockClient := mock.NewMockBeeClient() logger := logging.New(io.Discard, 0) acc := account.New(logger) - _, _, err := acc.CreateUserAccount("password", "") + _, _, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } fd := feed.New(acc.GetUserAccountInfo(), mockClient, logger) - pod1 := pod.NewPod(mockClient, fd, acc, logger) + tm := taskmanager.New(1, 10, time.Second*15, logger) + + pod1 := pod.NewPod(mockClient, fd, acc, tm, logger) t.Run("create-max-pods", func(t *testing.T) { - maxPodId := 140 + // t.SkipNow() + + maxPodId := 30 for i := 1; i <= maxPodId; i++ { - name, err := utils.GetRandString(25) + name, err := utils.GetRandString(utils.MaxPodNameLength) if err != nil { t.Fatal(err) } - _, err = pod1.CreatePod(name, "password", "") + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + _, err = pod1.CreatePod(name, "", podPassword) if err != nil { t.Fatalf("error creating pod %s with index %d: %s", name, i, err) } } - name, err := utils.GetRandString(25) + name, err := utils.GetRandString(utils.MaxPodNameLength) if err != nil { t.Fatal(err) } - _, err = pod1.CreatePod(name, "password", "") + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + _, err = pod1.CreatePod(name, "", podPassword) if !errors.Is(err, pod.ErrMaximumPodLimit) { t.Fatalf("maximum pod limit should have been reached") } diff --git a/pkg/pod/new.go b/pkg/pod/new.go index 4608dc16..0913c1f5 100644 --- a/pkg/pod/new.go +++ b/pkg/pod/new.go @@ -17,12 +17,10 @@ limitations under the License. package pod import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" + "encoding/hex" + "encoding/json" + + "github.com/ethereum/go-ethereum/crypto" "github.com/fairdatasociety/fairOS-dfs/pkg/account" c "github.com/fairdatasociety/fairOS-dfs/pkg/collection" @@ -37,28 +35,37 @@ const ( ) // CreatePod creates a new pod for a given user. -func (p *Pod) CreatePod(podName, passPhrase, addressString string) (*Info, error) { - podName, err := cleanPodName(podName) +func (p *Pod) CreatePod(podName, addressString, podPassword string) (*Info, error) { + podName, err := CleanPodName(podName) if err != nil { return nil, err } // check if pods is present and get free index - pods, sharedPods, err := p.loadUserPods() + podList, err := p.loadUserPods() if err != nil { // skipcq: TCV-001 return nil, err } + pods := map[int]string{} + sharedPods := map[string]string{} + for _, pod := range podList.Pods { + pods[pod.Index] = pod.Name + } + + for _, pod := range podList.SharedPods { + sharedPods[pod.Address] = pod.Name + } var accountInfo *account.Info var fd *feed.API var file *f.File var dir *d.Directory var user utils.Address if addressString != "" { - if p.checkIfPodPresent(pods, podName) { + if p.checkIfPodPresent(podList, podName) { return nil, ErrPodAlreadyExists } - if p.checkIfSharedPodPresent(sharedPods, podName) { + if p.checkIfSharedPodPresent(podList, podName) { return nil, ErrPodAlreadyExists } @@ -68,12 +75,17 @@ func (p *Pod) CreatePod(podName, passPhrase, addressString string) (*Info, error accountInfo.SetAddress(address) fd = feed.New(accountInfo, p.client, p.logger) - file = f.NewFile(podName, p.client, fd, accountInfo.GetAddress(), p.logger) - dir = d.NewDirectory(podName, p.client, fd, accountInfo.GetAddress(), file, p.logger) + file = f.NewFile(podName, p.client, fd, accountInfo.GetAddress(), p.tm, p.logger) + dir = d.NewDirectory(podName, p.client, fd, accountInfo.GetAddress(), file, p.tm, p.logger) // store the pod file with shared pod - sharedPods[addressString] = podName - err = p.storeUserPods(pods, sharedPods) + sharedPod := &SharedPodListItem{ + Name: podName, + Address: addressString, + Password: podPassword, + } + podList.SharedPods = append(podList.SharedPods, *sharedPod) + err = p.storeUserPods(podList) if err != nil { // skipcq: TCV-001 return nil, err } @@ -83,10 +95,10 @@ func (p *Pod) CreatePod(podName, passPhrase, addressString string) (*Info, error } else { // your own pod, so create a new account with private key - if p.checkIfPodPresent(pods, podName) { + if p.checkIfPodPresent(podList, podName) { return nil, ErrPodAlreadyExists } - if p.checkIfSharedPodPresent(sharedPods, podName) { + if p.checkIfSharedPodPresent(podList, podName) { return nil, ErrPodAlreadyExists } @@ -96,22 +108,27 @@ func (p *Pod) CreatePod(podName, passPhrase, addressString string) (*Info, error } // create a child account for the userAddress and other data structures for the pod - accountInfo, err = p.acc.CreatePodAccount(freeId, passPhrase, true) + accountInfo, err = p.acc.CreatePodAccount(freeId, true) if err != nil { // skipcq: TCV-001 return nil, err } fd = feed.New(accountInfo, p.client, p.logger) - file = f.NewFile(podName, p.client, fd, accountInfo.GetAddress(), p.logger) - dir = d.NewDirectory(podName, p.client, fd, accountInfo.GetAddress(), file, p.logger) + file = f.NewFile(podName, p.client, fd, accountInfo.GetAddress(), p.tm, p.logger) + dir = d.NewDirectory(podName, p.client, fd, accountInfo.GetAddress(), file, p.tm, p.logger) // store the pod file pods[freeId] = podName - err = p.storeUserPods(pods, sharedPods) + pod := &PodListItem{ + Name: podName, + Index: freeId, + Password: podPassword, + } + podList.Pods = append(podList.Pods, *pod) + err = p.storeUserPods(podList) if err != nil { // skipcq: TCV-001 return nil, err } - user = p.acc.GetAddress(freeId) } @@ -121,6 +138,7 @@ func (p *Pod) CreatePod(podName, passPhrase, addressString string) (*Info, error // create the pod info and store it in the podMap podInfo := &Info{ podName: podName, + podPassword: podPassword, userAddress: user, dir: dir, file: file, @@ -133,66 +151,45 @@ func (p *Pod) CreatePod(podName, passPhrase, addressString string) (*Info, error return podInfo, nil } -func (p *Pod) loadUserPods() (map[int]string, map[string]string, error) { +func (p *Pod) loadUserPods() (*PodList, error) { // The userAddress pod file topic should be in the name of the userAddress account topic := utils.HashString(podFile) - _, data, err := p.fd.GetFeedData(topic, p.acc.GetAddress(account.UserAccountIndex)) + privKeyBytes := crypto.FromECDSA(p.acc.GetUserAccountInfo().GetPrivateKey()) + _, data, err := p.fd.GetFeedData(topic, p.acc.GetAddress(account.UserAccountIndex), []byte(hex.EncodeToString(privKeyBytes))) if err != nil { // skipcq: TCV-001 if err.Error() != "feed does not exist or was not updated yet" { - return nil, nil, err + return nil, err } } + podList := &PodList{ + Pods: []PodListItem{}, + SharedPods: []SharedPodListItem{}, + } + if len(data) == 0 { + return podList, nil + } - buf := bytes.NewBuffer(data) - rd := bufio.NewReader(buf) - pods := make(map[int]string) - sharedPods := make(map[string]string) - for { - line, err := rd.ReadString('\n') - if err == io.EOF { - break - } - if err != nil { // skipcq: TCV-001 - return nil, nil, fmt.Errorf("loading pods: %w", err) - } - line = strings.Trim(line, "\n") - lines := strings.Split(line, ",") - index, err := strconv.ParseInt(lines[1], 10, 64) - p.logger.Debug(line) - if err != nil { - sharedPods[lines[1]] = lines[0] - continue - } - pods[int(index)] = lines[0] + err = json.Unmarshal(data, podList) + if err != nil { // skipcq: TCV-001 + return nil, err } - return pods, sharedPods, nil + + return podList, nil } -func (p *Pod) storeUserPods(pods map[int]string, sharedPods map[string]string) error { - buf := bytes.NewBuffer(nil) - podLen := len(pods) - for index, pod := range pods { - pod := strings.Trim(pod, "\n") - if podLen > 1 && pod == "" { // skipcq: TCV-001 - continue - } - line := fmt.Sprintf("%s,%d", pod, index) - buf.WriteString(line + "\n") +func (p *Pod) storeUserPods(podList *PodList) error { + data, err := json.Marshal(podList) + if err != nil { + return err } - for addr, pod := range sharedPods { - pod := strings.Trim(pod, "\n") - if podLen > 1 && pod == "" { - continue - } - line := fmt.Sprintf("%s,%s", pod, addr) - buf.WriteString(line + "\n") - } - if len(buf.Bytes()) > utils.MaxChunkLength { + if len(data) > utils.MaxChunkLength { return ErrMaximumPodLimit } topic := utils.HashString(podFile) - _, err := p.fd.UpdateFeed(topic, p.acc.GetAddress(account.UserAccountIndex), buf.Bytes()) + + privKeyBytes := crypto.FromECDSA(p.acc.GetUserAccountInfo().GetPrivateKey()) + _, err = p.fd.UpdateFeed(topic, p.acc.GetAddress(account.UserAccountIndex), data, []byte(hex.EncodeToString(privKeyBytes))) if err != nil { // skipcq: TCV-001 return err } @@ -212,35 +209,35 @@ func (*Pod) getFreeId(pods map[int]string) (int, error) { return 0, ErrMaxPodsReached // skipcq: TCV-001 } -func (*Pod) checkIfPodPresent(pods map[int]string, podName string) bool { - for _, pod := range pods { - if strings.Trim(pod, "\n") == podName { +func (*Pod) checkIfPodPresent(pods *PodList, podName string) bool { + for _, pod := range pods.Pods { + if pod.Name == podName { return true } } return false } -func (*Pod) checkIfSharedPodPresent(sharedPods map[string]string, podName string) bool { - for _, pod := range sharedPods { - if strings.Trim(pod, "\n") == podName { +func (*Pod) checkIfSharedPodPresent(pods *PodList, podName string) bool { + for _, pod := range pods.SharedPods { + if pod.Name == podName { return true } } return false } -func (p *Pod) getPodIndex(podName string) (int, error) { - pods, _, err := p.loadUserPods() +func (p *Pod) getPodIndex(podName string) (podIndex int, err error) { + podList, err := p.loadUserPods() if err != nil { return -1, err } // skipcq: TCV-001 - podIndex := -1 - for index, pod := range pods { - if strings.Trim(pod, "\n") == podName { - delete(pods, index) - podIndex = index + podIndex = -1 + for _, pod := range podList.Pods { + if pod.Name == podName { + podIndex = pod.Index + return } } - return podIndex, nil + return } diff --git a/pkg/pod/new_test.go b/pkg/pod/new_test.go index 6ee3e057..5beedc33 100644 --- a/pkg/pod/new_test.go +++ b/pkg/pod/new_test.go @@ -17,10 +17,14 @@ limitations under the License. package pod_test import ( + "context" "errors" - "io" + "os" "strings" "testing" + "time" + + "github.com/plexsysio/taskmanager" "github.com/fairdatasociety/fairOS-dfs/pkg/utils" @@ -33,14 +37,18 @@ import ( func TestNew(t *testing.T) { mockClient := mock.NewMockBeeClient() - logger := logging.New(io.Discard, 0) + logger := logging.New(os.Stdout, 0) acc := account.New(logger) - _, _, err := acc.CreateUserAccount("password", "") + _, _, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } fd := feed.New(acc.GetUserAccountInfo(), mockClient, logger) - pod1 := pod.NewPod(mockClient, fd, acc, logger) + tm := taskmanager.New(1, 10, time.Second*15, logger) + defer func() { + _ = tm.Stop(context.Background()) + }() + pod1 := pod.NewPod(mockClient, fd, acc, tm, logger) podName1 := "test1" podName2 := "test2" @@ -55,7 +63,8 @@ func TestNew(t *testing.T) { if err != nil { t.Fatalf("error creating pod %s", podName1) } - _, err = pod1.CreatePod(randomLongPOdName, "password", "") + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + _, err = pod1.CreatePod(randomLongPOdName, "", podPassword) if !errors.Is(err, pod.ErrTooLongPodName) { t.Fatalf("error creating pod %s", podName1) } @@ -63,9 +72,9 @@ func TestNew(t *testing.T) { if pod1Present { t.Fatal("pod1 should not be present") } - info, err := pod1.CreatePod(podName1, "password", "") + info, err := pod1.CreatePod(podName1, "", podPassword) if err != nil { - t.Fatalf("error creating pod %s", podName1) + t.Fatalf("error creating pod %s: %s", podName1, err.Error()) } if pod1.GetFeed() == nil || pod1.GetAccount() == nil { @@ -89,7 +98,7 @@ func TestNew(t *testing.T) { t.Fatalf("podName is not %s", podName1) } - infoGot, err := pod1.GetPodInfoFromPodMap(podName1) + infoGot, _, err := pod1.GetPodInfoFromPodMap(podName1) if err != nil { t.Fatalf("could not get pod from podMap") } @@ -100,7 +109,8 @@ func TestNew(t *testing.T) { }) t.Run("create-second-pod", func(t *testing.T) { - info, err := pod1.CreatePod(podName2, "password", "") + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + info, err := pod1.CreatePod(podName2, "", podPassword) if err != nil { t.Fatalf("error creating pod %s", podName2) } @@ -122,7 +132,7 @@ func TestNew(t *testing.T) { t.Fatalf("podName is not %s", podName2) } - infoGot, err := pod1.GetPodInfoFromPodMap(podName2) + infoGot, _, err := pod1.GetPodInfoFromPodMap(podName2) if err != nil { t.Fatalf("could not get pod from podMap") } diff --git a/pkg/pod/open.go b/pkg/pod/open.go index 48c1ba39..0a29ce17 100644 --- a/pkg/pod/open.go +++ b/pkg/pod/open.go @@ -17,14 +17,13 @@ limitations under the License. package pod import ( + "context" "fmt" - "strings" - - "github.com/fairdatasociety/fairOS-dfs/pkg/feed" "github.com/fairdatasociety/fairOS-dfs/pkg/account" c "github.com/fairdatasociety/fairOS-dfs/pkg/collection" d "github.com/fairdatasociety/fairOS-dfs/pkg/dir" + "github.com/fairdatasociety/fairOS-dfs/pkg/feed" f "github.com/fairdatasociety/fairOS-dfs/pkg/file" "github.com/fairdatasociety/fairOS-dfs/pkg/utils" ) @@ -32,29 +31,31 @@ import ( // OpenPod opens a pod if it is not already opened. as part of opening the pod // it loads all the data structures related to the pod. Also it syncs all the // files and directories under this pod from the Swarm network. -func (p *Pod) OpenPod(podName, passPhrase string) (*Info, error) { +func (p *Pod) OpenPod(podName string) (*Info, error) { // check if pods is present and get the index of the pod - pods, sharedPods, err := p.loadUserPods() + podList, err := p.loadUserPods() if err != nil { // skipcq: TCV-001 return nil, err } - sharedPodType := false - if !p.checkIfPodPresent(pods, podName) { - if !p.checkIfSharedPodPresent(sharedPods, podName) { + if !p.checkIfPodPresent(podList, podName) { + if !p.checkIfSharedPodPresent(podList, podName) { return nil, ErrInvalidPodName } else { sharedPodType = true } } - - var accountInfo *account.Info - var file *f.File - var fd *feed.API - var dir *d.Directory - var user utils.Address + var ( + podPassword string + accountInfo *account.Info + file *f.File + fd *feed.API + dir *d.Directory + user utils.Address + ) if sharedPodType { - addressString := p.getAddress(sharedPods, podName) + var addressString string + addressString, podPassword = p.getAddressPassword(podList, podName) if addressString == "" { // skipcq: TCV-001 return nil, fmt.Errorf("shared pod does not exist") } @@ -64,26 +65,27 @@ func (p *Pod) OpenPod(podName, passPhrase string) (*Info, error) { accountInfo.SetAddress(address) fd = feed.New(accountInfo, p.client, p.logger) - file = f.NewFile(podName, p.client, fd, accountInfo.GetAddress(), p.logger) - dir = d.NewDirectory(podName, p.client, fd, accountInfo.GetAddress(), file, p.logger) + file = f.NewFile(podName, p.client, fd, accountInfo.GetAddress(), p.tm, p.logger) + dir = d.NewDirectory(podName, p.client, fd, accountInfo.GetAddress(), file, p.tm, p.logger) // set the userAddress as the pod address we got from shared pod user = address } else { - index := p.getIndex(pods, podName) + var index int + index, podPassword = p.getIndexPassword(podList, podName) if index == -1 { return nil, fmt.Errorf("pod does not exist") } // Create pod account and other data structures // create a child account for the userAddress and other data structures for the pod - accountInfo, err = p.acc.CreatePodAccount(index, passPhrase, false) + accountInfo, err = p.acc.CreatePodAccount(index, false) if err != nil { // skipcq: TCV-001 return nil, err } fd = feed.New(accountInfo, p.client, p.logger) - file = f.NewFile(podName, p.client, fd, accountInfo.GetAddress(), p.logger) - dir = d.NewDirectory(podName, p.client, fd, accountInfo.GetAddress(), file, p.logger) + file = f.NewFile(podName, p.client, fd, accountInfo.GetAddress(), p.tm, p.logger) + dir = d.NewDirectory(podName, p.client, fd, accountInfo.GetAddress(), file, p.tm, p.logger) user = p.acc.GetAddress(index) } @@ -94,6 +96,7 @@ func (p *Pod) OpenPod(podName, passPhrase string) (*Info, error) { // create the pod info and store it in the podMap podInfo := &Info{ podName: podName, + podPassword: podPassword, userAddress: user, accountInfo: accountInfo, feed: fd, @@ -113,20 +116,109 @@ func (p *Pod) OpenPod(podName, passPhrase string) (*Info, error) { return podInfo, nil } -func (*Pod) getIndex(pods map[int]string, podName string) int { - for index, pod := range pods { - if strings.Trim(pod, "\n") == podName { - return index +// OpenPodAsync opens a pod if it is not already opened. as part of opening the pod +// it loads all the data structures related to the pod. Also it syncs all the +// files and directories under this pod from the Swarm network. +func (p *Pod) OpenPodAsync(ctx context.Context, podName string) (*Info, error) { + // check if pods is present and get the index of the pod + podList, err := p.loadUserPods() + if err != nil { // skipcq: TCV-001 + return nil, err + } + + sharedPodType := false + if !p.checkIfPodPresent(podList, podName) { + if !p.checkIfSharedPodPresent(podList, podName) { + return nil, ErrInvalidPodName + } else { + sharedPodType = true + } + } + + var ( + podPassword string + accountInfo *account.Info + file *f.File + fd *feed.API + dir *d.Directory + user utils.Address + ) + if sharedPodType { + var addressString string + addressString, podPassword = p.getAddressPassword(podList, podName) + if addressString == "" { // skipcq: TCV-001 + return nil, fmt.Errorf("shared pod does not exist") + } + + accountInfo = p.acc.GetEmptyAccountInfo() + address := utils.HexToAddress(addressString) + accountInfo.SetAddress(address) + + fd = feed.New(accountInfo, p.client, p.logger) + file = f.NewFile(podName, p.client, fd, accountInfo.GetAddress(), p.tm, p.logger) + dir = d.NewDirectory(podName, p.client, fd, accountInfo.GetAddress(), file, p.tm, p.logger) + + // set the userAddress as the pod address we got from shared pod + user = address + } else { + var index int + index, podPassword = p.getIndexPassword(podList, podName) + if index == -1 { + return nil, fmt.Errorf("pod does not exist") + } + // Create pod account and other data structures + // create a child account for the userAddress and other data structures for the pod + accountInfo, err = p.acc.CreatePodAccount(index, false) + if err != nil { // skipcq: TCV-001 + return nil, err + } + + fd = feed.New(accountInfo, p.client, p.logger) + file = f.NewFile(podName, p.client, fd, accountInfo.GetAddress(), p.tm, p.logger) + dir = d.NewDirectory(podName, p.client, fd, accountInfo.GetAddress(), file, p.tm, p.logger) + + user = p.acc.GetAddress(index) + } + + kvStore := c.NewKeyValueStore(podName, fd, accountInfo, user, p.client, p.logger) + docStore := c.NewDocumentStore(podName, fd, accountInfo, user, file, p.client, p.logger) + + // create the pod info and store it in the podMap + podInfo := &Info{ + podName: podName, + podPassword: podPassword, + userAddress: user, + accountInfo: accountInfo, + feed: fd, + dir: dir, + file: file, + kvStore: kvStore, + docStore: docStore, + } + + p.addPodToPodMap(podName, podInfo) + // sync the pod's files and directories + err = p.SyncPodAsync(ctx, podName) + if err != nil && err != d.ErrResourceDeleted { // skipcq: TCV-001 + return nil, err + } + return podInfo, nil +} + +func (*Pod) getIndexPassword(podList *PodList, podName string) (int, string) { + for _, pod := range podList.Pods { + if pod.Name == podName { + return pod.Index, pod.Password } } - return -1 // skipcq: TCV-001 + return -1, "" // skipcq: TCV-001 } -func (*Pod) getAddress(sharedPods map[string]string, podName string) string { - for address, pod := range sharedPods { - if strings.Trim(pod, "\n") == podName { - return address +func (*Pod) getAddressPassword(podList *PodList, podName string) (string, string) { + for _, pod := range podList.SharedPods { + if pod.Name == podName { + return pod.Address, pod.Password } } - return "" + return "", "" } diff --git a/pkg/pod/open_test.go b/pkg/pod/open_test.go index a9a6992e..3d5fd243 100644 --- a/pkg/pod/open_test.go +++ b/pkg/pod/open_test.go @@ -17,12 +17,17 @@ limitations under the License. package pod_test import ( + "context" "crypto/rand" "errors" "io" - "io/ioutil" "os" "testing" + "time" + + "github.com/fairdatasociety/fairOS-dfs/pkg/utils" + + "github.com/plexsysio/taskmanager" "github.com/fairdatasociety/fairOS-dfs/pkg/account" "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" @@ -36,39 +41,44 @@ func TestOpen(t *testing.T) { mockClient := mock.NewMockBeeClient() logger := logging.New(io.Discard, 0) acc := account.New(logger) - _, _, err := acc.CreateUserAccount("password", "") + _, _, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } - + tm := taskmanager.New(1, 10, time.Second*15, logger) + defer func() { + _ = tm.Stop(context.Background()) + }() fd := feed.New(acc.GetUserAccountInfo(), mockClient, logger) - pod1 := pod.NewPod(mockClient, fd, acc, logger) + pod1 := pod.NewPod(mockClient, fd, acc, tm, logger) podName1 := "test1" + podName2 := "test2" t.Run("open-pod", func(t *testing.T) { - // open non existent the pod - _, err := pod1.OpenPod(podName1, "password") + // open non-existent the pod + _, err := pod1.OpenPod(podName1) if !errors.Is(err, pod.ErrInvalidPodName) { t.Fatal("pod should not be present") } // create a pod - info, err := pod1.CreatePod(podName1, "password", "") + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + info, err := pod1.CreatePod(podName1, "", podPassword) if err != nil { t.Fatalf("error creating pod %s", podName1) } // make root dir so that other directories can be added - err = info.GetDirectory().MkRootDir("pod1", info.GetPodAddress(), info.GetFeed()) + err = info.GetDirectory().MkRootDir("pod1", podPassword, info.GetPodAddress(), info.GetFeed()) if err != nil { t.Fatal(err) } // create some dir and files - addFilesAndDirectories(t, info, pod1, podName1) + addFilesAndDirectories(t, info, pod1, podName1, podPassword) // open the pod - podInfo, err := pod1.OpenPod(podName1, "password") + podInfo, err := pod1.OpenPod(podName1) if err != nil { t.Fatal(err) } @@ -77,7 +87,7 @@ func TestOpen(t *testing.T) { if podInfo == nil { t.Fatalf("pod not opened") } - gotPodInfo, err := pod1.GetPodInfoFromPodMap(podName1) + gotPodInfo, _, err := pod1.GetPodInfoFromPodMap(podName1) if err != nil { t.Fatalf("pod not opened") } @@ -88,11 +98,56 @@ func TestOpen(t *testing.T) { t.Fatalf("invalid pod name") } }) + + t.Run("open-pod-async", func(t *testing.T) { + // open non-existent the pod + _, err := pod1.OpenPod(podName2) + if !errors.Is(err, pod.ErrInvalidPodName) { + t.Fatal("pod should not be present") + } + + // create a pod + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + info, err := pod1.CreatePod(podName2, "", podPassword) + if err != nil { + t.Fatalf("error creating pod %s", podName1) + } + + // make root dir so that other directories can be added + err = info.GetDirectory().MkRootDir("pod1", podPassword, info.GetPodAddress(), info.GetFeed()) + if err != nil { + t.Fatal(err) + } + + // create some dir and files + addFilesAndDirectories(t, info, pod1, podName2, podPassword) + + // open the pod + podInfo, err := pod1.OpenPodAsync(context.Background(), podName2) + if err != nil { + t.Fatal(err) + } + + // validate if properly opened + if podInfo == nil { + t.Fatalf("pod not opened") + } + gotPodInfo, _, err := pod1.GetPodInfoFromPodMap(podName2) + if err != nil { + t.Fatalf("pod not opened") + } + if gotPodInfo == nil { + t.Fatalf("pod not opened") + } + if gotPodInfo.GetPodName() != podName2 { + t.Fatalf("invalid pod name") + } + }) } -func uploadFile(t *testing.T, fileObject *file.File, filePath, fileName, compression string, fileSize int64, blockSize uint32) ([]byte, error) { +func uploadFile(t *testing.T, fileObject *file.File, filePath, fileName, compression, podPassword string, fileSize int64, blockSize uint32) ([]byte, error) { // create a temp file - fd, err := ioutil.TempFile("", fileName) + fd, err := os.CreateTemp("", fileName) if err != nil { t.Fatal(err) } @@ -122,13 +177,13 @@ func uploadFile(t *testing.T, fileObject *file.File, filePath, fileName, compres } // upload the temp file - return content, fileObject.Upload(f1, fileName, fileSize, blockSize, filePath, compression) + return content, fileObject.Upload(f1, fileName, fileSize, blockSize, filePath, compression, podPassword) } -func addFilesAndDirectories(t *testing.T, info *pod.Info, pod1 *pod.Pod, podName1 string) { +func addFilesAndDirectories(t *testing.T, info *pod.Info, pod1 *pod.Pod, podName1, podPassword string) { t.Helper() dirObject := info.GetDirectory() - err := dirObject.MkDir("/parentDir") + err := dirObject.MkDir("/parentDir", podPassword) if err != nil { t.Fatal(err) } @@ -142,28 +197,28 @@ func addFilesAndDirectories(t *testing.T, info *pod.Info, pod1 *pod.Pod, podName } // populate the directory with few directory and files - err = dirObject.MkDir("/parentDir/subDir1") + err = dirObject.MkDir("/parentDir/subDir1", podPassword) if err != nil { t.Fatal(err) } - err = dirObject.MkDir("/parentDir/subDir2") + err = dirObject.MkDir("/parentDir/subDir2", podPassword) if err != nil { t.Fatal(err) } fileObject := info.GetFile() - _, err = uploadFile(t, fileObject, "/parentDir", "file1", "", 100, 10) + _, err = uploadFile(t, fileObject, "/parentDir", "file1", "", podPassword, 100, 10) if err != nil { t.Fatal(err) } - err = dirObject.AddEntryToDir("/parentDir", "file1", true) + err = dirObject.AddEntryToDir("/parentDir", podPassword, "file1", true) if err != nil { t.Fatal(err) } - _, err = uploadFile(t, fileObject, "/parentDir", "file2", "", 200, 20) + _, err = uploadFile(t, fileObject, "/parentDir", "file2", "", podPassword, 200, 20) if err != nil { t.Fatal(err) } - err = dirObject.AddEntryToDir("/parentDir", "file2", true) + err = dirObject.AddEntryToDir("/parentDir", podPassword, "file2", true) if err != nil { t.Fatal(err) } diff --git a/pkg/pod/pod.go b/pkg/pod/pod.go index ae1c457c..4df6eb4e 100644 --- a/pkg/pod/pod.go +++ b/pkg/pod/pod.go @@ -21,14 +21,16 @@ import ( "sync" "github.com/fairdatasociety/fairOS-dfs/pkg/account" + "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore" "github.com/fairdatasociety/fairOS-dfs/pkg/feed" "github.com/fairdatasociety/fairOS-dfs/pkg/logging" - - "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore" + "github.com/fairdatasociety/fairOS-dfs/pkg/taskmanager" ) const ( maxPodId = 65535 + + PodPasswordLength = 32 ) type Pod struct { @@ -38,10 +40,29 @@ type Pod struct { podMap map[string]*Info // podName -> dir podMu *sync.RWMutex logger logging.Logger + tm taskmanager.TaskManagerGO +} + +type PodListItem struct { + Name string `json:"name"` + Index int `json:"index"` + Password string `json:"password"` +} + +type SharedPodListItem struct { + Name string `json:"name"` + Address string `json:"address"` + Password string `json:"password"` +} + +type PodList struct { + Pods []PodListItem `json:"pods"` + SharedPods []SharedPodListItem `json:"sharedPods"` } // NewPod creates the main pod object which has all the methods related to the pods. -func NewPod(client blockstore.Client, feed *feed.API, account *account.Account, logger logging.Logger) *Pod { +func NewPod(client blockstore.Client, feed *feed.API, account *account.Account, + m taskmanager.TaskManagerGO, logger logging.Logger) *Pod { return &Pod{ fd: feed, acc: account, @@ -49,6 +70,7 @@ func NewPod(client blockstore.Client, feed *feed.API, account *account.Account, podMap: make(map[string]*Info), podMu: &sync.RWMutex{}, logger: logger, + tm: m, } } @@ -64,13 +86,13 @@ func (p *Pod) removePodFromPodMap(podName string) { delete(p.podMap, podName) } -func (p *Pod) GetPodInfoFromPodMap(podName string) (*Info, error) { +func (p *Pod) GetPodInfoFromPodMap(podName string) (*Info, string, error) { p.podMu.Lock() defer p.podMu.Unlock() if podInfo, ok := p.podMap[podName]; ok { - return podInfo, nil + return podInfo, podInfo.podPassword, nil } - return nil, fmt.Errorf("could not find pod: %s", podName) + return nil, "", fmt.Errorf("could not find pod: %s", podName) } func (p *Pod) GetFeed() *feed.API { diff --git a/pkg/pod/sharing.go b/pkg/pod/sharing.go index 0c5e363e..e7db42b1 100644 --- a/pkg/pod/sharing.go +++ b/pkg/pod/sharing.go @@ -25,31 +25,32 @@ import ( ) type ShareInfo struct { - PodName string `json:"pod_name"` - Address string `json:"pod_address"` - UserAddress string `json:"user_address"` + PodName string `json:"podName"` + Address string `json:"podAddress"` + Password string `json:"password"` + UserAddress string `json:"userAddress"` } // PodShare makes a pod public by exporting all the pod related information and its // address. it does this by creating a sharing reference which points to the information // required to import this pod. -func (p *Pod) PodShare(podName, sharedPodName, passPhrase string) (string, error) { +func (p *Pod) PodShare(podName, sharedPodName string) (string, error) { // check if pods is present and get the index of the pod - pods, _, err := p.loadUserPods() + podList, err := p.loadUserPods() if err != nil { // skipcq: TCV-001 return "", err } - if !p.checkIfPodPresent(pods, podName) { + if !p.checkIfPodPresent(podList, podName) { return "", ErrInvalidPodName } - index := p.getIndex(pods, podName) + index, podPassword := p.getIndexPassword(podList, podName) if index == -1 { // skipcq: TCV-001 return "", fmt.Errorf("pod does not exist") } // Create pod account and get the address - accountInfo, err := p.acc.CreatePodAccount(index, passPhrase, false) + accountInfo, err := p.acc.CreatePodAccount(index, false) if err != nil { // skipcq: TCV-001 return "", err } @@ -61,6 +62,7 @@ func (p *Pod) PodShare(podName, sharedPodName, passPhrase string) (string, error } shareInfo := &ShareInfo{ PodName: sharedPodName, + Password: podPassword, Address: address.String(), UserAddress: userAddress.String(), } @@ -69,7 +71,6 @@ func (p *Pod) PodShare(podName, sharedPodName, passPhrase string) (string, error if err != nil { // skipcq: TCV-001 return "", err } - ref, err := p.client.UploadBlob(data, true, true) if err != nil { // skipcq: TCV-001 return "", err @@ -107,7 +108,6 @@ func (p *Pod) ReceivePod(sharedPodName string, ref utils.Reference) (*Info, erro if resp != http.StatusOK { // skipcq: TCV-001 return nil, fmt.Errorf("ReceivePod: could not download blob") } - var shareInfo ShareInfo err = json.Unmarshal(data, &shareInfo) if err != nil { // skipcq: TCV-001 @@ -117,5 +117,5 @@ func (p *Pod) ReceivePod(sharedPodName string, ref utils.Reference) (*Info, erro if sharedPodName != "" { shareInfo.PodName = sharedPodName } - return p.CreatePod(shareInfo.PodName, "", shareInfo.Address) + return p.CreatePod(shareInfo.PodName, shareInfo.Address, shareInfo.Password) } diff --git a/pkg/pod/sharing_test.go b/pkg/pod/sharing_test.go index 912e79a4..18f31fd0 100644 --- a/pkg/pod/sharing_test.go +++ b/pkg/pod/sharing_test.go @@ -17,9 +17,13 @@ limitations under the License. package pod_test import ( + "context" "errors" "io" "testing" + "time" + + "github.com/plexsysio/taskmanager" "github.com/fairdatasociety/fairOS-dfs/pkg/account" "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" @@ -33,81 +37,86 @@ func TestShare(t *testing.T) { mockClient := mock.NewMockBeeClient() logger := logging.New(io.Discard, 0) acc := account.New(logger) - _, _, err := acc.CreateUserAccount("password", "") + _, _, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } fd := feed.New(acc.GetUserAccountInfo(), mockClient, logger) - pod1 := pod.NewPod(mockClient, fd, acc, logger) + tm := taskmanager.New(1, 10, time.Second*15, logger) + defer func() { + _ = tm.Stop(context.Background()) + }() + pod1 := pod.NewPod(mockClient, fd, acc, tm, logger) podName1 := "test1" acc2 := account.New(logger) - _, _, err = acc2.CreateUserAccount("password2", "") + _, _, err = acc2.CreateUserAccount("") if err != nil { t.Fatal(err) } fd2 := feed.New(acc2.GetUserAccountInfo(), mockClient, logger) - pod2 := pod.NewPod(mockClient, fd2, acc2, logger) + pod2 := pod.NewPod(mockClient, fd2, acc2, tm, logger) podName2 := "test2" acc3 := account.New(logger) - _, _, err = acc3.CreateUserAccount("password3", "") + _, _, err = acc3.CreateUserAccount("") if err != nil { t.Fatal(err) } fd3 := feed.New(acc3.GetUserAccountInfo(), mockClient, logger) - pod3 := pod.NewPod(mockClient, fd3, acc3, logger) + pod3 := pod.NewPod(mockClient, fd3, acc3, tm, logger) podName3 := "test3" acc4 := account.New(logger) - _, _, err = acc4.CreateUserAccount("password4", "") + _, _, err = acc4.CreateUserAccount("") if err != nil { t.Fatal(err) } fd4 := feed.New(acc4.GetUserAccountInfo(), mockClient, logger) - pod4 := pod.NewPod(mockClient, fd4, acc4, logger) + pod4 := pod.NewPod(mockClient, fd4, acc4, tm, logger) podName4 := "test4" acc5 := account.New(logger) - _, _, err = acc5.CreateUserAccount("password5", "") + _, _, err = acc5.CreateUserAccount("") if err != nil { t.Fatal(err) } fd5 := feed.New(acc5.GetUserAccountInfo(), mockClient, logger) - pod5 := pod.NewPod(mockClient, fd5, acc5, logger) + pod5 := pod.NewPod(mockClient, fd5, acc5, tm, logger) podName5 := "test5" acc6 := account.New(logger) - _, _, err = acc6.CreateUserAccount("password6", "") + _, _, err = acc6.CreateUserAccount("") if err != nil { t.Fatal(err) } fd6 := feed.New(acc6.GetUserAccountInfo(), mockClient, logger) - pod6 := pod.NewPod(mockClient, fd6, acc6, logger) + pod6 := pod.NewPod(mockClient, fd6, acc6, tm, logger) podName6 := "test6" t.Run("share-pod", func(t *testing.T) { - _, err := pod1.PodShare(podName1, "", "password") + _, err := pod1.PodShare(podName1, "") if err == nil { t.Fatal("pod share should fail, not exists") } // create a pod - info, err := pod1.CreatePod(podName1, "password", "") + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + info, err := pod1.CreatePod(podName1, "", podPassword) if err != nil { t.Fatalf("error creating pod %s", podName1) } // make root dir so that other directories can be added - err = info.GetDirectory().MkRootDir("pod1", info.GetPodAddress(), info.GetFeed()) + err = info.GetDirectory().MkRootDir("pod1", podPassword, info.GetPodAddress(), info.GetFeed()) if err != nil { t.Fatal(err) } // create some dir and files - addFilesAndDirectories(t, info, pod1, podName1) + addFilesAndDirectories(t, info, pod1, podName1, podPassword) // share pod - sharingRef, err := pod1.PodShare(podName1, "", "password") + sharingRef, err := pod1.PodShare(podName1, "") if err != nil { t.Fatal(err) } @@ -121,23 +130,24 @@ func TestShare(t *testing.T) { t.Run("share-pod-with-new-name", func(t *testing.T) { // create a pod podName01 := "test_1" - info, err := pod1.CreatePod(podName01, "password", "") + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + info, err := pod1.CreatePod(podName01, "", podPassword) if err != nil { t.Fatalf("error creating pod %s", podName01) } // make root dir so that other directories can be added - err = info.GetDirectory().MkRootDir("", info.GetPodAddress(), info.GetFeed()) + err = info.GetDirectory().MkRootDir("", podPassword, info.GetPodAddress(), info.GetFeed()) if err != nil { t.Fatal(err) } // create some dir and files - addFilesAndDirectories(t, info, pod1, podName01) + addFilesAndDirectories(t, info, pod1, podName01, podPassword) // share pod sharedPodName := "test01" - sharingRef, err := pod1.PodShare(podName01, sharedPodName, "password") + sharingRef, err := pod1.PodShare(podName01, sharedPodName) if err != nil { t.Fatal(err) } @@ -147,7 +157,7 @@ func TestShare(t *testing.T) { t.Fatalf("could not share pod") } - //receive pod info for name validation + // receive pod info for name validation ref, err := utils.ParseHexReference(sharingRef) if err != nil { t.Fatal(err) @@ -168,22 +178,23 @@ func TestShare(t *testing.T) { t.Run("receive-pod-info", func(t *testing.T) { // create a pod - info, err := pod2.CreatePod(podName2, "password2", "") + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + info, err := pod2.CreatePod(podName2, "", podPassword) if err != nil { t.Fatalf("error creating pod %s", podName2) } // make root dir so that other directories can be added - err = info.GetDirectory().MkRootDir("pod1", info.GetPodAddress(), info.GetFeed()) + err = info.GetDirectory().MkRootDir("pod1", podPassword, info.GetPodAddress(), info.GetFeed()) if err != nil { t.Fatal(err) } // create some dir and files - addFilesAndDirectories(t, info, pod2, podName2) + addFilesAndDirectories(t, info, pod2, podName2, podPassword) // share pod - sharingRef, err := pod2.PodShare(podName2, "", "password2") + sharingRef, err := pod2.PodShare(podName2, "") if err != nil { t.Fatal(err) } @@ -209,7 +220,8 @@ func TestShare(t *testing.T) { t.Run("receive-pod", func(t *testing.T) { // create sending pod and receiving pod - info, err := pod3.CreatePod(podName3, "password3", "") + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + info, err := pod3.CreatePod(podName3, "", podPassword) if err != nil { t.Fatalf("error creating pod %s", podName3) } @@ -217,8 +229,7 @@ func TestShare(t *testing.T) { if err == nil { t.Fatalf("GetAccountInfo for pod4 should fail") } - - pi4, err := pod4.CreatePod(podName4, "password4", "") + pi4, err := pod4.CreatePod(podName4, "", podPassword) if err != nil { t.Fatalf("error creating pod %s", podName4) } @@ -239,16 +250,16 @@ func TestShare(t *testing.T) { t.Fatalf("pod4 address does not match") } // make root dir so that other directories can be added - err = info.GetDirectory().MkRootDir("", info.GetPodAddress(), info.GetFeed()) + err = info.GetDirectory().MkRootDir("", podPassword, info.GetPodAddress(), info.GetFeed()) if err != nil { t.Fatal(err) } // create some dir and files - addFilesAndDirectories(t, info, pod3, podName3) + addFilesAndDirectories(t, info, pod3, podName3, podPassword) // share pod - sharingRef, err := pod3.PodShare(podName3, "", "password3") + sharingRef, err := pod3.PodShare(podName3, "") if err != nil { t.Fatal(err) } @@ -267,7 +278,7 @@ func TestShare(t *testing.T) { t.Fatal("pod3 should be present") } - podInfo2, err := pod4.OpenPod(podName3, "password4") + podInfo2, err := pod4.OpenPod(podName3) if err != nil { t.Fatal(err) } @@ -298,20 +309,23 @@ func TestShare(t *testing.T) { if len(sharedPods) != 1 && sharedPods[0] != podName4 { t.Fatalf("invalid pod name") } - - _, err = pod4.CreatePod(podName4, "", ref.String()) + podPassword, _ = utils.GetRandString(pod.PodPasswordLength) + _, err = pod4.CreatePod(podName4, ref.String(), podPassword) if !errors.Is(err, pod.ErrPodAlreadyExists) { t.Fatal("pod should exist") } - _, err = pod4.CreatePod(podName3, "", ref.String()) + podPassword, _ = utils.GetRandString(pod.PodPasswordLength) + _, err = pod4.CreatePod(podName3, ref.String(), podPassword) if !errors.Is(err, pod.ErrPodAlreadyExists) { t.Fatal("shared pod should exist") } - _, err = pod4.CreatePod(podName4, "password4", "") + podPassword, _ = utils.GetRandString(pod.PodPasswordLength) + _, err = pod4.CreatePod(podName4, "", podPassword) if !errors.Is(err, pod.ErrPodAlreadyExists) { t.Fatal("pod should exist") } - _, err = pod4.CreatePod(podName3, "password4", "") + podPassword, _ = utils.GetRandString(pod.PodPasswordLength) + _, err = pod4.CreatePod(podName3, "", podPassword) if !errors.Is(err, pod.ErrPodAlreadyExists) { t.Fatal("shared pod should exist") } @@ -329,26 +343,28 @@ func TestShare(t *testing.T) { t.Run("receive-pod-with-new-name", func(t *testing.T) { // create sending pod and receiving pod - info, err := pod5.CreatePod(podName5, "password5", "") + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + info, err := pod5.CreatePod(podName5, "", podPassword) if err != nil { t.Fatalf("error creating pod %s", podName3) } - _, err = pod6.CreatePod(podName6, "password6", "") + podPassword, _ = utils.GetRandString(pod.PodPasswordLength) + _, err = pod6.CreatePod(podName6, "", podPassword) if err != nil { t.Fatalf("error creating pod %s", podName4) } // make root dir so that other directories can be added - err = info.GetDirectory().MkRootDir("", info.GetPodAddress(), info.GetFeed()) + err = info.GetDirectory().MkRootDir("", podPassword, info.GetPodAddress(), info.GetFeed()) if err != nil { t.Fatal(err) } // create some dir and files - addFilesAndDirectories(t, info, pod5, podName5) + addFilesAndDirectories(t, info, pod5, podName5, podPassword) // share pod - sharingRef, err := pod5.PodShare(podName5, "", "password5") + sharingRef, err := pod5.PodShare(podName5, "") if err != nil { t.Fatal(err) } diff --git a/pkg/pod/stat.go b/pkg/pod/stat.go index 9580f155..127babbb 100644 --- a/pkg/pod/stat.go +++ b/pkg/pod/stat.go @@ -24,7 +24,7 @@ type Stat struct { // PodStat shows all the pod related information like podname and its current address. func (p *Pod) PodStat(podName string) (*Stat, error) { - podInfo, err := p.GetPodInfoFromPodMap(podName) + podInfo, _, err := p.GetPodInfoFromPodMap(podName) if err != nil { return nil, ErrInvalidPodName } diff --git a/pkg/pod/stat_test.go b/pkg/pod/stat_test.go index 7212560b..f001e791 100644 --- a/pkg/pod/stat_test.go +++ b/pkg/pod/stat_test.go @@ -17,9 +17,15 @@ limitations under the License. package pod_test import ( + "context" "io" "strings" "testing" + "time" + + "github.com/fairdatasociety/fairOS-dfs/pkg/utils" + + "github.com/plexsysio/taskmanager" "github.com/fairdatasociety/fairOS-dfs/pkg/account" "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" @@ -32,12 +38,16 @@ func TestStat(t *testing.T) { mockClient := mock.NewMockBeeClient() logger := logging.New(io.Discard, 0) acc := account.New(logger) - _, _, err := acc.CreateUserAccount("password", "") + _, _, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } fd := feed.New(acc.GetUserAccountInfo(), mockClient, logger) - pod1 := pod.NewPod(mockClient, fd, acc, logger) + tm := taskmanager.New(1, 10, time.Second*15, logger) + defer func() { + _ = tm.Stop(context.Background()) + }() + pod1 := pod.NewPod(mockClient, fd, acc, tm, logger) podName1 := "test1" t.Run("pod-stat", func(t *testing.T) { @@ -45,7 +55,8 @@ func TestStat(t *testing.T) { if err == nil { t.Fatal("stat should be nil") } - info, err := pod1.CreatePod(podName1, "password", "") + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + info, err := pod1.CreatePod(podName1, "", podPassword) if err != nil { t.Fatalf("error creating pod %s", podName1) } diff --git a/pkg/pod/sync.go b/pkg/pod/sync.go index 40bfada7..c31056d2 100644 --- a/pkg/pod/sync.go +++ b/pkg/pod/sync.go @@ -16,10 +16,15 @@ limitations under the License. package pod +import ( + "context" + "sync" +) + // SyncPod syncs the pod to the latest version by extracting the current meta information // of files and directories of the pod. func (p *Pod) SyncPod(podName string) error { - podName, err := cleanPodName(podName) + podName, err := CleanPodName(podName) if err != nil { // skipcq: TCV-001 return err } @@ -28,16 +33,42 @@ func (p *Pod) SyncPod(podName string) error { return ErrPodNotOpened } - podInfo, err := p.GetPodInfoFromPodMap(podName) + podInfo, _, err := p.GetPodInfoFromPodMap(podName) if err != nil { // skipcq: TCV-001 return err } // sync from the root directory - err = podInfo.GetDirectory().SyncDirectory("/") + err = podInfo.GetDirectory().SyncDirectory("/", podInfo.GetPodPassword()) if err != nil { return err } + return nil +} + +// SyncPodAsync syncs the pod to the latest version by extracting the current meta information +// of files and directories of the pod, concurrently. +func (p *Pod) SyncPodAsync(ctx context.Context, podName string) error { + podName, err := CleanPodName(podName) + if err != nil { // skipcq: TCV-001 + return err + } + if !p.IsPodOpened(podName) { + return ErrPodNotOpened + } + + podInfo, _, err := p.GetPodInfoFromPodMap(podName) + if err != nil { // skipcq: TCV-001 + return err + } + + // sync from the root directory + wg := new(sync.WaitGroup) + err = podInfo.GetDirectory().SyncDirectoryAsync(ctx, "/", podInfo.GetPodPassword(), wg) + if err != nil { + return err + } + wg.Wait() return nil } diff --git a/pkg/pod/sync_test.go b/pkg/pod/sync_test.go index 936c98c9..d6c08c06 100644 --- a/pkg/pod/sync_test.go +++ b/pkg/pod/sync_test.go @@ -17,27 +17,34 @@ limitations under the License. package pod_test import ( + "context" "io" "testing" + "time" "github.com/fairdatasociety/fairOS-dfs/pkg/account" "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" "github.com/fairdatasociety/fairOS-dfs/pkg/feed" "github.com/fairdatasociety/fairOS-dfs/pkg/logging" "github.com/fairdatasociety/fairOS-dfs/pkg/pod" + "github.com/fairdatasociety/fairOS-dfs/pkg/utils" + "github.com/plexsysio/taskmanager" ) func TestSync(t *testing.T) { mockClient := mock.NewMockBeeClient() logger := logging.New(io.Discard, 0) acc := account.New(logger) - _, _, err := acc.CreateUserAccount("password", "") + _, _, err := acc.CreateUserAccount("") if err != nil { t.Fatal(err) } - + tm := taskmanager.New(1, 10, time.Second*15, logger) + defer func() { + _ = tm.Stop(context.Background()) + }() fd := feed.New(acc.GetUserAccountInfo(), mockClient, logger) - pod1 := pod.NewPod(mockClient, fd, acc, logger) + pod1 := pod.NewPod(mockClient, fd, acc, tm, logger) podName1 := "test1" t.Run("sync-pod", func(t *testing.T) { @@ -47,22 +54,22 @@ func TestSync(t *testing.T) { t.Fatal("sync should fail, pod not opened") } // create a pod - info, err := pod1.CreatePod(podName1, "password", "") + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) + info, err := pod1.CreatePod(podName1, "", podPassword) if err != nil { t.Fatalf("error creating pod %s", podName1) } - // make root dir so that other directories can be added - err = info.GetDirectory().MkRootDir("pod1", info.GetPodAddress(), info.GetFeed()) + err = info.GetDirectory().MkRootDir("pod1", podPassword, info.GetPodAddress(), info.GetFeed()) if err != nil { t.Fatal(err) } // create some dir and files - addFilesAndDirectories(t, info, pod1, podName1) + addFilesAndDirectories(t, info, pod1, podName1, podPassword) - // open the pod ( ths triggers sync too - gotInfo, err := pod1.OpenPod(podName1, "password") + // open the pod ths triggers sync too + gotInfo, err := pod1.OpenPod(podName1) if err != nil { t.Fatal(err) } diff --git a/pkg/pod/utils.go b/pkg/pod/utils.go index 02ab5664..0e64d489 100644 --- a/pkg/pod/utils.go +++ b/pkg/pod/utils.go @@ -36,19 +36,19 @@ func (p *Pod) IsPodOpened(podName string) bool { // IsPodPresent checks if a pod is already present for user func (p *Pod) IsPodPresent(podName string) bool { - podName, err := cleanPodName(podName) + podName, err := CleanPodName(podName) if err != nil { return false } // check if pods is present and get free index - pods, sharedPods, err := p.loadUserPods() + podList, err := p.loadUserPods() if err != nil { // skipcq: TCV-001 return false } - if p.checkIfPodPresent(pods, podName) { + if p.checkIfPodPresent(podList, podName) { return true } - if p.checkIfSharedPodPresent(sharedPods, podName) { + if p.checkIfSharedPodPresent(podList, podName) { return true } return false @@ -72,15 +72,15 @@ func (*Pod) GetName(inode *d.Inode) string { // GetAccountInfo returns the pod account info func (p *Pod) GetAccountInfo(podName string) (*account.Info, error) { - podInfo, err := p.GetPodInfoFromPodMap(podName) + podInfo, _, err := p.GetPodInfoFromPodMap(podName) if err != nil { return nil, err } return podInfo.GetAccountInfo(), nil } -// cleanPodName trims spaces from a pod name -func cleanPodName(podName string) (string, error) { +// CleanPodName trims spaces from a pod name +func CleanPodName(podName string) (string, error) { if podName == "" { return "", ErrInvalidPodName } diff --git a/pkg/taskmanager/taskmanager.go b/pkg/taskmanager/taskmanager.go new file mode 100644 index 00000000..17b2d931 --- /dev/null +++ b/pkg/taskmanager/taskmanager.go @@ -0,0 +1,7 @@ +package taskmanager + +import "github.com/plexsysio/taskmanager" + +type TaskManagerGO interface { + Go(newTask taskmanager.Task) (<-chan struct{}, error) +} diff --git a/pkg/taskmanager/tasks.go b/pkg/taskmanager/tasks.go new file mode 100644 index 00000000..1bbb8907 --- /dev/null +++ b/pkg/taskmanager/tasks.go @@ -0,0 +1 @@ +package taskmanager diff --git a/pkg/user/delete.go b/pkg/user/delete.go index aac02bb7..d14c5c60 100644 --- a/pkg/user/delete.go +++ b/pkg/user/delete.go @@ -16,51 +16,6 @@ limitations under the License. package user -// DeleteUser deletes a user from the Swarm network. Logs him out if he is logged in and remove from all the -// data structures. -// skipcq: TCV-001 -func (u *Users) DeleteUser(userName, dataDir, password, sessionId string, ui *Info) error { - // check if session id and user address present in map - if !u.IsUserLoggedIn(sessionId) { // skipcq: TCV-001 - return ErrUserNotLoggedIn - } - - // username validation - if !u.IsUsernameAvailable(userName, dataDir) { // skipcq: TCV-001 - return ErrInvalidUserName - } - - // check for valid password - userInfo := u.getUserFromMap(sessionId) - acc := userInfo.account - if !acc.Authorise(password) { - return ErrInvalidPassword - } - - // skipcq: TCV-001 - // Logout user - err := u.Logout(sessionId) - if err != nil { - return err - } - - // skipcq: TCV-001 - // remove the user mnemonic file and the user-address mapping file - address, err := u.getAddressFromUserName(userName, dataDir) - if err != nil { // skipcq: TCV-001 - return err - } - err = u.deleteMnemonic(userName, address, ui.GetFeed(), u.client) // skipcq: TCV-001 - if err != nil { // skipcq: TCV-001 - return err - } - err = u.deleteUserMapping(userName, dataDir) - if err != nil { // skipcq: TCV-001 - return err - } - return nil // skipcq: TCV-001 -} - // DeleteUserV2 deletes a user from the Swarm network. Logs him out if he is logged in and remove from all the // data structures. func (u *Users) DeleteUserV2(userName, password, sessionId string, ui *Info) error { @@ -77,15 +32,12 @@ func (u *Users) DeleteUserV2(userName, password, sessionId string, ui *Info) err // check for valid password userInfo := u.getUserFromMap(sessionId) acc := userInfo.account - if !acc.Authorise(password) { - return ErrInvalidPassword - } - // Logout user - err := u.Logout(sessionId) + err := u.deletePortableAccount(acc.GetUserAccountInfo().GetAddress(), userName, password, ui.GetFeed()) if err != nil { // skipcq: TCV-001 return err } - return u.deletePortableAccount(acc.GetUserAccountInfo().GetAddress(), userName, password, ui.GetFeed()) + // Logout user + return u.Logout(sessionId) } diff --git a/pkg/user/delete_test.go b/pkg/user/delete_test.go index 601b59ca..3dcb9e56 100644 --- a/pkg/user/delete_test.go +++ b/pkg/user/delete_test.go @@ -17,9 +17,13 @@ limitations under the License. package user_test import ( + "context" "errors" "io" "testing" + "time" + + "github.com/plexsysio/taskmanager" "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" mock2 "github.com/fairdatasociety/fairOS-dfs/pkg/ensm/eth/mock" @@ -32,26 +36,27 @@ func TestDelete(t *testing.T) { logger := logging.New(io.Discard, 0) t.Run("delete-user", func(t *testing.T) { + tm := taskmanager.New(1, 10, time.Second*15, logger) + defer func() { + _ = tm.Stop(context.Background()) + }() ens := mock2.NewMockNamespaceManager() - //create user + // create user userObject := user.NewUsers("", mockClient, ens, logger) - _, _, _, _, ui, err := userObject.CreateNewUserV2("user1", "password1", "", "") + _, _, _, _, ui, err := userObject.CreateNewUserV2("user1", "password1", "", "", tm) if err != nil { t.Fatal(err) } - // delete user with wrong password err = userObject.DeleteUserV2("user1", "password11", ui.GetSessionId(), ui) - if !errors.Is(err, user.ErrInvalidPassword) { - t.Fatal(err) + if err == nil { + t.Fatal("delete should fail") } - // delete user invalid sessionid err = userObject.DeleteUserV2("user1", "password1", "invalid_session", ui) if !errors.Is(err, user.ErrUserNotLoggedIn) { t.Fatal(err) } - // delete user err = userObject.DeleteUserV2("user1", "password1", ui.GetSessionId(), ui) if err != nil { diff --git a/pkg/user/errors.go b/pkg/user/errors.go index fc4937bb..6b1b12ed 100644 --- a/pkg/user/errors.go +++ b/pkg/user/errors.go @@ -19,9 +19,27 @@ package user import "errors" var ( + // ErrUserAlreadyLoggedIn is returned if username is already logged-in ErrUserAlreadyLoggedIn = errors.New("user already logged in") - ErrInvalidUserName = errors.New("invalid user name") - ErrUserAlreadyPresent = errors.New("user name already present") - ErrUserNotLoggedIn = errors.New("user not logged in") - ErrInvalidPassword = errors.New("invalid password") + + // ErrInvalidUserName is returned if the username is invalid + ErrInvalidUserName = errors.New("invalid user name") + + // ErrUserNameNotFound is returned if the username is invalid + ErrUserNameNotFound = errors.New("no user available") + + // ErrUserAlreadyPresent is returned if user name is already taken while signup + ErrUserAlreadyPresent = errors.New("user name already present") + + // ErrUserNotLoggedIn is returned if user is not logged in + ErrUserNotLoggedIn = errors.New("user not logged in") + + // ErrInvalidPassword is returned if password is invalid + ErrInvalidPassword = errors.New("invalid password") + + // ErrBlankPassword is returned if dfs.API CreateAccountV2 is called with a blank password + ErrBlankPassword = errors.New("password is blank") + + // ErrBlankUsername is returned if dfs.API CreateAccountV2 is called with a blank username + ErrBlankUsername = errors.New("username is blank") ) diff --git a/pkg/user/export.go b/pkg/user/export.go index 50e1000f..3365a638 100644 --- a/pkg/user/export.go +++ b/pkg/user/export.go @@ -13,6 +13,7 @@ limitations under the License. package user +/* // ExportUser gives back the information required to export the user from one dfs server // import him in to another. func (u *Users) ExportUser(ui *Info) (string, string, error) { @@ -22,3 +23,4 @@ func (u *Users) ExportUser(ui *Info) (string, string, error) { } return ui.name, address.Hex(), nil } +*/ diff --git a/pkg/user/export_test.go b/pkg/user/export_test.go index 01c11d33..7aaa4bd8 100644 --- a/pkg/user/export_test.go +++ b/pkg/user/export_test.go @@ -1,18 +1,6 @@ package user -import ( - "io" - "io/ioutil" - "os" - "testing" - - "github.com/fairdatasociety/fairOS-dfs/pkg/account" - - "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" - mock2 "github.com/fairdatasociety/fairOS-dfs/pkg/ensm/eth/mock" - "github.com/fairdatasociety/fairOS-dfs/pkg/logging" -) - +/* func TestExport(t *testing.T) { mockClient := mock.NewMockBeeClient() logger := logging.New(io.Discard, 0) @@ -24,7 +12,7 @@ func TestExport(t *testing.T) { } defer os.RemoveAll(dataDir) ens := mock2.NewMockNamespaceManager() - //create user + // create user userObject := NewUsers(dataDir, mockClient, ens, logger) _, _, ui, err := userObject.CreateNewUser("7e4567e7cb003804992eef11fd5c757275a4a", "password1", "", "") if err != nil { @@ -45,5 +33,5 @@ func TestExport(t *testing.T) { t.Fatal("address mismatch") } }) - } +*/ diff --git a/pkg/user/login.go b/pkg/user/login.go index 300cd2d3..2d9f6219 100644 --- a/pkg/user/login.go +++ b/pkg/user/login.go @@ -19,6 +19,8 @@ package user import ( "sync" + "github.com/fairdatasociety/fairOS-dfs/pkg/taskmanager" + "github.com/ethereum/go-ethereum/crypto" "github.com/fairdatasociety/fairOS-dfs/pkg/account" "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore" @@ -32,10 +34,10 @@ import ( // LoginUserV2 checks if the user is present and logs in the user. It also creates the required information // to execute user function and stores it in memory. -func (u *Users) LoginUserV2(userName, passPhrase string, client blockstore.Client, sessionId string) (*Info, string, string, error) { +func (u *Users) LoginUserV2(userName, passPhrase string, client blockstore.Client, tm taskmanager.TaskManagerGO, sessionId string) (*Info, string, string, error) { // check if username is available (user created) if !u.IsUsernameAvailableV2(userName) { - return nil, "", "", ErrInvalidUserName + return nil, "", "", ErrUserNameNotFound } // get owner address from Subdomain registrar @@ -46,20 +48,21 @@ func (u *Users) LoginUserV2(userName, passPhrase string, client blockstore.Clien // create account acc := account.New(u.logger) accountInfo := acc.GetUserAccountInfo() - // load public key from public resolver - publicKey, nameHash, err := u.ens.GetInfo(userName) - if err != nil { // skipcq: TCV-001 - return nil, "", "", err - } - pb := crypto.FromECDSAPub(publicKey) - // load encrypted private key fd := feed.New(accountInfo, client, u.logger) key, err := u.downloadPortableAccount(utils.Address(address), userName, passPhrase, fd) if err != nil { + u.logger.Errorf(err.Error()) return nil, "", "", ErrInvalidPassword } + // load public key from public resolver + publicKey, nameHash, err := u.ens.GetInfo(userName) + if err != nil { // skipcq: TCV-001 + return nil, "", "", err + } + pb := crypto.FromECDSAPub(publicKey) + // decrypt and remove pad from private ley seed, err := accountInfo.RemovePadFromSeed(key, passPhrase) if err != nil { // skipcq: TCV-001 @@ -76,9 +79,9 @@ func (u *Users) LoginUserV2(userName, passPhrase string, client blockstore.Clien } // Instantiate pod, dir & file objects - file := f.NewFile(userName, client, fd, accountInfo.GetAddress(), u.logger) - dir := d.NewDirectory(userName, client, fd, accountInfo.GetAddress(), file, u.logger) - pod := p.NewPod(u.client, fd, acc, u.logger) + file := f.NewFile(userName, client, fd, accountInfo.GetAddress(), tm, u.logger) + pod := p.NewPod(u.client, fd, acc, tm, u.logger) + dir := d.NewDirectory(userName, client, fd, accountInfo.GetAddress(), file, tm, u.logger) if sessionId == "" { sessionId = cookie.GetUniqueSessionId() } @@ -98,66 +101,6 @@ func (u *Users) LoginUserV2(userName, passPhrase string, client blockstore.Clien return ui, nameHash, utils.Encode(pb), u.addUserAndSessionToMap(ui) } -// LoginUser checks if the user is present and logs in the user. It also creates the required information -// to execute user function and stores it in memory. -func (u *Users) LoginUser(userName, passPhrase, dataDir string, client blockstore.Client, sessionId string) (*Info, error) { - // check if username is available (user created) - if !u.IsUsernameAvailable(userName, dataDir) { // skipcq: TCV-001 - return nil, ErrInvalidUserName - } - - // create account - acc := account.New(u.logger) - accountInfo := acc.GetUserAccountInfo() - - // load address from userName - address, err := u.getAddressFromUserName(userName, dataDir) - if err != nil { // skipcq: TCV-001 - return nil, err - } - - // load encrypted mnemonic from Swarm - fd := feed.New(accountInfo, client, u.logger) - encryptedMnemonic, err := u.getEncryptedMnemonic(userName, address, fd) - if err != nil { // skipcq: TCV-001 - return nil, err - } - - err = acc.LoadUserAccount(passPhrase, encryptedMnemonic) - if err != nil { // skipcq: TCV-001 - if err.Error() == "mnemonic is invalid" { // skipcq: TCV-001 - return nil, ErrInvalidPassword - } - return nil, err - } - - if u.IsUserLoggedIn(sessionId) { // skipcq: TCV-001 - return nil, ErrUserAlreadyLoggedIn - } - // Instantiate pod, dir & file objects - file := f.NewFile(userName, client, fd, accountInfo.GetAddress(), u.logger) - dir := d.NewDirectory(userName, client, fd, accountInfo.GetAddress(), file, u.logger) - pod := p.NewPod(u.client, fd, acc, u.logger) - if sessionId == "" { - sessionId = cookie.GetUniqueSessionId() - } - - ui := &Info{ - name: userName, - sessionId: sessionId, - feedApi: fd, - account: acc, - file: file, - dir: dir, - pod: pod, - openPods: make(map[string]*p.Info), - openPodsMu: &sync.RWMutex{}, - } - - // set cookie and add user to map - return ui, u.addUserAndSessionToMap(ui) -} - func (u *Users) addUserAndSessionToMap(ui *Info) error { u.addUserToMap(ui) return nil diff --git a/pkg/user/login_test.go b/pkg/user/login_test.go index 497b68b7..ef8903fa 100644 --- a/pkg/user/login_test.go +++ b/pkg/user/login_test.go @@ -20,11 +20,13 @@ import ( "errors" "io" "testing" + "time" "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" mock2 "github.com/fairdatasociety/fairOS-dfs/pkg/ensm/eth/mock" "github.com/fairdatasociety/fairOS-dfs/pkg/logging" "github.com/fairdatasociety/fairOS-dfs/pkg/user" + "github.com/plexsysio/taskmanager" ) func TestLogin(t *testing.T) { @@ -32,10 +34,12 @@ func TestLogin(t *testing.T) { logger := logging.New(io.Discard, 0) t.Run("login-user", func(t *testing.T) { + tm := taskmanager.New(1, 10, time.Second*15, logger) + ens := mock2.NewMockNamespaceManager() - //create user + // create user userObject := user.NewUsers("", mockClient, ens, logger) - _, _, _, _, ui, err := userObject.CreateNewUserV2("7e4567e7cb003804992eef11fd5c757275a4c", "password1", "", "") + _, _, _, _, ui, err := userObject.CreateNewUserV2("7e4567e7cb003804992eef11fd5c757275a4c", "password1", "", "", tm) if err != nil { t.Fatal(err) } @@ -46,18 +50,18 @@ func TestLogin(t *testing.T) { t.Fatal(err) } - _, _, _, err = userObject.LoginUserV2("not_an_username", "password1", mockClient, "") - if !errors.Is(err, user.ErrInvalidUserName) { + _, _, _, err = userObject.LoginUserV2("not_an_username", "password1", mockClient, tm, "") + if !errors.Is(err, user.ErrUserNameNotFound) { t.Fatal(err) } - _, _, _, err = userObject.LoginUserV2("7e4567e7cb003804992eef11fd5c757275a4c", "wrong_password", mockClient, "") + _, _, _, err = userObject.LoginUserV2("7e4567e7cb003804992eef11fd5c757275a4c", "wrong_password", mockClient, tm, "") if !errors.Is(err, user.ErrInvalidPassword) { t.Fatal(err) } // addUserAndSessionToMap user again - ui1, _, _, err := userObject.LoginUserV2("7e4567e7cb003804992eef11fd5c757275a4c", "password1", mockClient, "") + ui1, _, _, err := userObject.LoginUserV2("7e4567e7cb003804992eef11fd5c757275a4c", "password1", mockClient, tm, "") if err != nil { t.Fatal(err) } diff --git a/pkg/user/logout.go b/pkg/user/logout.go index 88d868ae..3b975629 100644 --- a/pkg/user/logout.go +++ b/pkg/user/logout.go @@ -18,11 +18,6 @@ package user // LogoutUser logs out a giver user from the system and clean him from all the data structures. func (u *Users) LogoutUser(userName, sessionId string) error { - // basic validations - //if !u.IsUsernameAvailableV2(userName) { - // return ErrInvalidUserName - //} - // unset cookie and remove user from map if !u.IsUserLoggedIn(sessionId) { return ErrUserNotLoggedIn diff --git a/pkg/user/logout_test.go b/pkg/user/logout_test.go index 6337ba0d..2a4b31bc 100644 --- a/pkg/user/logout_test.go +++ b/pkg/user/logout_test.go @@ -20,6 +20,9 @@ import ( "errors" "io" "testing" + "time" + + "github.com/plexsysio/taskmanager" "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" mock2 "github.com/fairdatasociety/fairOS-dfs/pkg/ensm/eth/mock" @@ -32,10 +35,12 @@ func TestLogout(t *testing.T) { logger := logging.New(io.Discard, 0) t.Run("logout-user", func(t *testing.T) { + tm := taskmanager.New(1, 10, time.Second*15, logger) + ens := mock2.NewMockNamespaceManager() - //create user + // create user userObject := user.NewUsers("", mockClient, ens, logger) - _, _, _, _, ui, err := userObject.CreateNewUserV2("user1", "password1", "", "") + _, _, _, _, ui, err := userObject.CreateNewUserV2("user1", "password1", "", "", tm) if err != nil { t.Fatal(err) } diff --git a/pkg/user/migrate.go b/pkg/user/migrate.go deleted file mode 100644 index 85226bd1..00000000 --- a/pkg/user/migrate.go +++ /dev/null @@ -1,82 +0,0 @@ -package user - -import ( - "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore" - "github.com/fairdatasociety/fairOS-dfs/pkg/feed" -) - -// MigrateUser migrates a user credential from local storage to the Swarm network. -// Deletes local information. It also deletes previous mnemonic and stores it in secondary location -// Logs him out if he is logged in. -func (u *Users) MigrateUser(oldUsername, newUsername, dataDir, password, sessionId string, client blockstore.Client, ui *Info) error { - // check if session id and user address present in map - if !u.IsUserLoggedIn(sessionId) { - return ErrUserNotLoggedIn - } - if newUsername == "" { - newUsername = oldUsername - } - // username availability - if !u.IsUsernameAvailable(oldUsername, dataDir) { - return ErrInvalidUserName - } - - // username availability for v2 - if u.IsUsernameAvailableV2(newUsername) { - return ErrUserAlreadyPresent - } - - // check for valid password - userInfo := u.getUserFromMap(sessionId) - acc := userInfo.account - if !acc.Authorise(password) { // skipcq: TCV-001 - return ErrInvalidPassword - } - accountInfo := acc.GetUserAccountInfo() - - // create ens subdomain and store mnemonic - _, err := u.createENS(newUsername, accountInfo) - if err != nil { // skipcq: TCV-001 - return err - } - // load address from userName - address, err := u.getAddressFromUserName(oldUsername, dataDir) - if err != nil { - return err - } - - fd := feed.New(accountInfo, client, u.logger) - encryptedMnemonic, err := u.getEncryptedMnemonic(oldUsername, address, fd) - if err != nil { // skipcq: TCV-001 - return err - } - err = acc.LoadUserAccount(password, encryptedMnemonic) - if err != nil { // skipcq: TCV-001 - return err - } - - seed, err := acc.GetWallet().LoadSeedFromMnemonic(password) - if err != nil { - return err - } - key, err := accountInfo.PadSeed(seed, password) - if err != nil { // skipcq: TCV-001 - return err - } - if err := u.uploadPortableAccount(accountInfo, newUsername, password, key, fd); err != nil { // skipcq: TCV-001 - return err - } - - // Logout user - err = u.Logout(sessionId) - if err != nil { // skipcq: TCV-001 - return err - } - - err = u.deleteMnemonic(oldUsername, accountInfo.GetAddress(), ui.GetFeed(), u.client) - if err != nil { // skipcq: TCV-001 - return err - } - - return u.deleteUserMapping(oldUsername, dataDir) -} diff --git a/pkg/user/migrate_test.go b/pkg/user/migrate_test.go deleted file mode 100644 index 62ca575d..00000000 --- a/pkg/user/migrate_test.go +++ /dev/null @@ -1,236 +0,0 @@ -package user - -import ( - "errors" - "io" - "io/ioutil" - "os" - "testing" - - "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" - mock2 "github.com/fairdatasociety/fairOS-dfs/pkg/ensm/eth/mock" - "github.com/fairdatasociety/fairOS-dfs/pkg/logging" -) - -func TestNew(t *testing.T) { - mockClient := mock.NewMockBeeClient() - logger := logging.New(io.Discard, 0) - - t.Run("new-user-migrate-invalid-user", func(t *testing.T) { - dataDir, err := ioutil.TempDir("", "new") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dataDir) - - ens := mock2.NewMockNamespaceManager() - - //create user - userObject := NewUsers(dataDir, mockClient, ens, logger) - username := "user12" - password := "password1" - _, _, ui, err := userObject.CreateNewUser(username, password, "", "") - if err != nil { - t.Fatal(err) - } - err = userObject.MigrateUser("username_not_present", "", dataDir, password, ui.sessionId, mockClient, ui) - if !errors.Is(err, ErrInvalidUserName) { - t.Fatal(err) - } - }) - - t.Run("new-user-migrate-invalid-session", func(t *testing.T) { - dataDir, err := ioutil.TempDir("", "new") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dataDir) - - ens := mock2.NewMockNamespaceManager() - - //create user - userObject := NewUsers(dataDir, mockClient, ens, logger) - username := "user12" - password := "password1" - _, _, _, err = userObject.CreateNewUser(username, password, "", "") - if err != nil { - t.Fatal(err) - } - - err = userObject.MigrateUser(username, "", dataDir, password, "asd8989", mockClient, nil) - if err == nil { - t.Fatalf("invalid sessionId") - } - }) - - t.Run("new-user-migrate", func(t *testing.T) { - dataDir, err := ioutil.TempDir("", "new") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dataDir) - - ens := mock2.NewMockNamespaceManager() - - //create user - userObject := NewUsers(dataDir, mockClient, ens, logger) - username := "user1" - password := "password1" - _, mnemonic, ui, err := userObject.CreateNewUser(username, password, "", "") - if err != nil { - t.Fatal(err) - } - pod1 := ui.GetPod() - podName1 := "test1" - - pi1, err := pod1.CreatePod(podName1, password, "") - if err != nil { - t.Fatalf("error creating pod %s : %s", podName1, err.Error()) - } - - if ui.GetUserName() != "user1" { - t.Fatalf("invalid user name") - } - if ui.GetFeed() == nil || ui.GetAccount() == nil { - t.Fatalf("invalid feed or account") - } - err = ui.GetAccount().GetWallet().IsValidMnemonic(mnemonic) - if err != nil { - t.Fatalf("invalid mnemonic") - } - - err = userObject.MigrateUser(username, "", dataDir, password, ui.sessionId, mockClient, ui) - if err != nil { - t.Fatalf("migrate user: %s", err.Error()) - } - - ui2, _, _, err := userObject.LoginUserV2(username, password, mockClient, "") - if err != nil { - t.Fatalf("v2 login: %s", err.Error()) - } - pod2 := ui2.GetPod() - pi2, err := pod2.OpenPod(podName1, password) - if err != nil { - t.Fatalf("open pod after migration: %s", err.Error()) - } - if pi1.GetPodAddress() != pi2.GetPodAddress() { - t.Fatalf("pod accounts do not match") - } - }) - - t.Run("new-user-migrate-already-migrated", func(t *testing.T) { - dataDir, err := ioutil.TempDir("", "new") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dataDir) - - ens := mock2.NewMockNamespaceManager() - - //create user - userObject := NewUsers(dataDir, mockClient, ens, logger) - username := "user14" - password := "password1" - _, _, ui, err := userObject.CreateNewUser(username, password, "", "") - if err != nil { - t.Fatal(err) - } - - err = userObject.MigrateUser(username, "", dataDir, password, ui.sessionId, mockClient, ui) - if err != nil { - t.Fatalf("migrate user: %s", err.Error()) - } - - _, _, ui, err = userObject.CreateNewUser(username, password, "", "") - if err != nil { - t.Fatal(err) - } - - err = userObject.MigrateUser(username, "", dataDir, password, ui.sessionId, mockClient, ui) - if !errors.Is(err, ErrUserAlreadyPresent) { - t.Fatal("user already migrated") - } - }) - - t.Run("new-user-migrate-with-pods", func(t *testing.T) { - dataDir, err := ioutil.TempDir("", "new") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dataDir) - - ens := mock2.NewMockNamespaceManager() - - //create user - userObject := NewUsers(dataDir, mockClient, ens, logger) - username := "user1" - password := "password1" - _, mnemonic, ui, err := userObject.CreateNewUser(username, password, "", "") - if err != nil { - t.Fatal(err) - } - pod1 := ui.GetPod() - podName1 := "test1" - podName2 := "test2" - - pi1, err := pod1.CreatePod(podName1, password, "") - if err != nil { - t.Fatalf("error creating pod %s : %s", podName1, err.Error()) - } - pi2, err := pod1.CreatePod(podName2, password, "") - if err != nil { - t.Fatalf("error creating pod %s : %s", podName1, err.Error()) - } - - if ui.GetUserName() != "user1" { - t.Fatalf("invalid user name") - } - if ui.GetFeed() == nil || ui.GetAccount() == nil { - t.Fatalf("invalid feed or account") - } - err = ui.GetAccount().GetWallet().IsValidMnemonic(mnemonic) - if err != nil { - t.Fatalf("invalid mnemonic") - } - - err = userObject.Logout(ui.GetSessionId()) - if err != nil { - t.Fatalf("logout failed: %s", err) - } - - loggedIn := userObject.IsUserLoggedIn(ui.sessionId) - if loggedIn { - t.Fatalf("user logout failed") - } - - ui, err = userObject.LoginUser(username, password, dataDir, mockClient, "") - if err != nil { - t.Fatal("v1 login failed") - } - err = userObject.MigrateUser(username, "", dataDir, password, ui.sessionId, mockClient, ui) - if err != nil { - t.Fatalf("migrate user: %s", err.Error()) - } - - ui2, _, _, err := userObject.LoginUserV2(username, password, mockClient, "") - if err != nil { - t.Fatalf("v2 login: %s", err.Error()) - } - pod2 := ui2.GetPod() - pi3, err := pod2.OpenPod(podName1, password) - if err != nil { - t.Fatalf("open pod after migration: %s", err.Error()) - } - if pi1.GetPodAddress() != pi3.GetPodAddress() { - t.Fatalf("pod accounts do not match") - } - pi4, err := pod2.OpenPod(podName2, password) - if err != nil { - t.Fatalf("open pod after migration: %s", err.Error()) - } - if pi2.GetPodAddress() != pi4.GetPodAddress() { - t.Fatalf("pod accounts do not match") - } - }) - -} diff --git a/pkg/user/name_address.go b/pkg/user/name_address.go deleted file mode 100644 index ba7a6a80..00000000 --- a/pkg/user/name_address.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright © 2020 FairOS Authors -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package user - -import ( - "os" - "path/filepath" - - "github.com/fairdatasociety/fairOS-dfs/pkg/utils" -) - -const ( - userDirectoryName = "user" -) - -func (*Users) isUserMappingPresent(userName, dataDir string) bool { - destDir := filepath.Join(dataDir, userDirectoryName) - err := os.MkdirAll(destDir, 0700) - if err != nil { // skipcq: TCV-001 - return false - } - userFileName := filepath.Join(destDir, userName) - info, err := os.Stat(userFileName) - if os.IsNotExist(err) { // skipcq: TCV-001 - return false - } - return !info.IsDir() -} - -func (*Users) storeUserNameToAddressFileMapping(userName, dataDir string, address utils.Address) error { - destDir := filepath.Join(dataDir, userDirectoryName) - err := os.MkdirAll(destDir, 0700) - if err != nil { // skipcq: TCV-001 - return err - } - userFileName := filepath.Join(destDir, userName) - return os.WriteFile(userFileName, address.ToBytes(), 0700) -} - -func (*Users) deleteUserMapping(userName, dataDir string) error { - destDir := filepath.Join(dataDir, userDirectoryName) - userFileName := filepath.Join(destDir, userName) - return os.Remove(userFileName) -} - -func (*Users) getAddressFromUserName(userName, dataDir string) (utils.Address, error) { - destDir := filepath.Join(dataDir, userDirectoryName) - userFileName := filepath.Join(destDir, userName) - data, err := os.ReadFile(userFileName) - if err != nil { - return utils.ZeroAddress, err - } - return utils.NewAddress(data), nil -} diff --git a/pkg/user/new.go b/pkg/user/new.go index c0515881..6ec61b78 100644 --- a/pkg/user/new.go +++ b/pkg/user/new.go @@ -20,6 +20,8 @@ import ( "regexp" "sync" + "github.com/fairdatasociety/fairOS-dfs/pkg/taskmanager" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/fairdatasociety/fairOS-dfs/pkg/account" @@ -30,72 +32,11 @@ import ( f "github.com/fairdatasociety/fairOS-dfs/pkg/file" p "github.com/fairdatasociety/fairOS-dfs/pkg/pod" "github.com/fairdatasociety/fairOS-dfs/pkg/utils" - hdwallet "github.com/miguelmota/go-ethereum-hdwallet" ) -// CreateNewUser creates a new user with the given user name and password. if a mnemonic is passed +// CreateNewUserV2 creates a new user with the given username and password. if a mnemonic is passed // then it is used instead of creating a new one. -func (u *Users) CreateNewUser(userName, passPhrase, mnemonic, sessionId string) (string, string, *Info, error) { - // username validation - if u.IsUsernameAvailable(userName, u.dataDir) { // skipcq: TCV-001 - return "", "", nil, ErrUserAlreadyPresent - } - - acc := account.New(u.logger) - accountInfo := acc.GetUserAccountInfo() - fd := feed.New(accountInfo, u.client, u.logger) - - //create a new base user account with the mnemonic - mnemonic, encryptedMnemonic, err := acc.CreateUserAccount(passPhrase, mnemonic) - if err != nil { // skipcq: TCV-001 - return "", "", nil, err - } - - // store the encrypted mnemonic in Swarm - err = u.uploadEncryptedMnemonic(userName, accountInfo.GetAddress(), encryptedMnemonic, fd) - if err != nil { // skipcq: TCV-001 - return "", "", nil, err - } - - // store the username -> address mapping locally - err = u.storeUserNameToAddressFileMapping(userName, u.dataDir, accountInfo.GetAddress()) - if err != nil { // skipcq: TCV-001 - return "", "", nil, err - } - - // Instantiate pod, dir & file objects - file := f.NewFile(userName, u.client, fd, accountInfo.GetAddress(), u.logger) - dir := d.NewDirectory(userName, u.client, fd, accountInfo.GetAddress(), file, u.logger) - pod := p.NewPod(u.client, fd, acc, u.logger) - if sessionId == "" { - sessionId = cookie.GetUniqueSessionId() - } - - userAddressString := accountInfo.GetAddress().Hex() - ui := &Info{ - name: userName, - sessionId: sessionId, - feedApi: fd, - account: acc, - file: file, - dir: dir, - pod: pod, - openPods: make(map[string]*p.Info), - openPodsMu: &sync.RWMutex{}, - } - - // set cookie and add user to map - err = u.addUserAndSessionToMap(ui) - if err != nil { // skipcq: TCV-001 - return "", "", nil, err - } - - return userAddressString, mnemonic, ui, nil -} - -// CreateNewUserV2 creates a new user with the given user name and password. if a mnemonic is passed -// then it is used instead of creating a new one. -func (u *Users) CreateNewUserV2(userName, passPhrase, mnemonic, sessionId string) (string, string, string, string, *Info, error) { +func (u *Users) CreateNewUserV2(userName, passPhrase, mnemonic, sessionId string, tm taskmanager.TaskManagerGO) (string, string, string, string, *Info, error) { // Check username validity if !isUserNameValid(userName) { return "", "", "", "", nil, ErrInvalidUserName @@ -108,9 +49,9 @@ func (u *Users) CreateNewUserV2(userName, passPhrase, mnemonic, sessionId string acc := account.New(u.logger) accountInfo := acc.GetUserAccountInfo() fd := feed.New(accountInfo, u.client, u.logger) - //create a new base user account with the mnemonic - mnemonic, _, err := acc.CreateUserAccount(passPhrase, mnemonic) - if err != nil { + // create a new base user account with the mnemonic + mnemonic, seed, err := acc.CreateUserAccount(mnemonic) + if err != nil { // skipcq: TCV-001 return "", "", "", "", nil, err } @@ -122,10 +63,6 @@ func (u *Users) CreateNewUserV2(userName, passPhrase, mnemonic, sessionId string } return "", "", "", "", nil, err // skipcq: TCV-001 } - seed, err := hdwallet.NewSeedFromMnemonic(mnemonic) - if err != nil { // skipcq: TCV-001 - return "", "", "", "", nil, err - } key, err := accountInfo.PadSeed(seed, passPhrase) if err != nil { // skipcq: TCV-001 return "", "", "", "", nil, err @@ -134,9 +71,9 @@ func (u *Users) CreateNewUserV2(userName, passPhrase, mnemonic, sessionId string return "", "", "", "", nil, err } // Instantiate pod, dir & file objects - file := f.NewFile(userName, u.client, fd, accountInfo.GetAddress(), u.logger) - dir := d.NewDirectory(userName, u.client, fd, accountInfo.GetAddress(), file, u.logger) - pod := p.NewPod(u.client, fd, acc, u.logger) + file := f.NewFile(userName, u.client, fd, accountInfo.GetAddress(), tm, u.logger) + dir := d.NewDirectory(userName, u.client, fd, accountInfo.GetAddress(), file, tm, u.logger) + pod := p.NewPod(u.client, fd, acc, tm, u.logger) if sessionId == "" { sessionId = cookie.GetUniqueSessionId() } diff --git a/pkg/user/new_test.go b/pkg/user/new_test.go index 05e37351..ab6c9893 100644 --- a/pkg/user/new_test.go +++ b/pkg/user/new_test.go @@ -20,6 +20,9 @@ import ( "errors" "io" "testing" + "time" + + "github.com/plexsysio/taskmanager" "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" mock2 "github.com/fairdatasociety/fairOS-dfs/pkg/ensm/eth/mock" @@ -30,13 +33,14 @@ import ( func TestNew(t *testing.T) { mockClient := mock.NewMockBeeClient() logger := logging.New(io.Discard, 0) + tm := taskmanager.New(1, 10, time.Second*15, logger) t.Run("new-blank-username", func(t *testing.T) { ens := mock2.NewMockNamespaceManager() - //create user + // create user userObject := user.NewUsers("", mockClient, ens, logger) - _, _, _, _, _, err := userObject.CreateNewUserV2("", "password1", "", "") + _, _, _, _, _, err := userObject.CreateNewUserV2("", "password1", "", "", tm) if !errors.Is(err, user.ErrInvalidUserName) { t.Fatal(err) } @@ -45,14 +49,14 @@ func TestNew(t *testing.T) { t.Run("new-user", func(t *testing.T) { ens := mock2.NewMockNamespaceManager() - //create user + // create user userObject := user.NewUsers("", mockClient, ens, logger) - _, mnemonic, _, _, ui, err := userObject.CreateNewUserV2("user1", "password1", "", "") + _, mnemonic, _, _, ui, err := userObject.CreateNewUserV2("user1", "password1", "", "", tm) if err != nil { t.Fatal(err) } - _, _, _, _, _, err = userObject.CreateNewUserV2("user1", "password1", "", "") + _, _, _, _, _, err = userObject.CreateNewUserV2("user1", "password1", "", "", tm) if !errors.Is(err, user.ErrUserAlreadyPresent) { t.Fatal(err) } @@ -70,8 +74,8 @@ func TestNew(t *testing.T) { if ui.GetUserName() != "user1" { t.Fatalf("invalid user name") } - if ui.GetFeed() == nil || ui.GetAccount() == nil { - t.Fatalf("invalid feed or account") + if ui.GetFeed() == nil || ui.GetAccount() == nil || ui.GetPod() == nil { + t.Fatalf("invalid feed, account or pod") } err = ui.GetAccount().GetWallet().IsValidMnemonic(mnemonic) if err != nil { diff --git a/pkg/user/present.go b/pkg/user/present.go index 7ec1e414..ec67aa0d 100644 --- a/pkg/user/present.go +++ b/pkg/user/present.go @@ -16,12 +16,7 @@ limitations under the License. package user -// IsUsernameAvailable checks if a supplied user name is present in this dfs server. -func (u *Users) IsUsernameAvailable(userName, dataDir string) bool { - return u.isUserMappingPresent(userName, dataDir) -} - -// IsUsernameAvailableV2 checks if a supplied user name is present in blockchain +// IsUsernameAvailableV2 checks if a supplied username is present in blockchain func (u *Users) IsUsernameAvailableV2(userName string) bool { addr, err := u.ens.GetOwner(userName) if err != nil { // skipcq: TCV-001 diff --git a/pkg/user/sharing.go b/pkg/user/sharing.go index 00a976ef..a7c6c7fc 100644 --- a/pkg/user/sharing.go +++ b/pkg/user/sharing.go @@ -30,37 +30,66 @@ import ( ) type SharingEntry struct { - Meta *f.MetaData `json:"meta"` - Sender string `json:"source_address"` - Receiver string `json:"dest_address"` - SharedTime string `json:"shared_time"` + Meta *SharingMetaData `json:"meta"` + Sender string `json:"sourceAddress"` + Receiver string `json:"destAddress"` + SharedTime string `json:"sharedTime"` +} + +type SharingMetaData struct { + Version uint8 `json:"version"` + Path string `json:"filePath"` + Name string `json:"fileName"` + SharedPassword string `json:"sharedPassword"` + Size uint64 `json:"fileSize"` + BlockSize uint32 `json:"blockSize"` + ContentType string `json:"contentType"` + Compression string `json:"compression"` + CreationTime int64 `json:"creationTime"` + AccessTime int64 `json:"accessTime"` + ModificationTime int64 `json:"modificationTime"` + InodeAddress []byte `json:"fileInodeReference"` } type ReceiveFileInfo struct { FileName string `json:"name"` Size string `json:"size"` - BlockSize string `json:"block_size"` - NumberOfBlocks string `json:"number_of_blocks"` - ContentType string `json:"content_type"` + BlockSize string `json:"blockSize"` + NumberOfBlocks string `json:"numberOfBlocks"` + ContentType string `json:"contentType"` Compression string `json:"compression"` - PodName string `json:"pod_name"` - Sender string `json:"source_address"` - Receiver string `json:"dest_address"` - SharedTime string `json:"shared_time"` + Sender string `json:"sourceAddress"` + Receiver string `json:"destAddress"` + SharedTime string `json:"sharedTime"` } // ShareFileWithUser exports a file to another user by creating and uploading a new encrypted sharing file entry. -func (u *Users) ShareFileWithUser(podName, podFileWithPath, destinationRef string, userInfo *Info, pod *pod.Pod, userAddress utils.Address) (string, error) { +func (u *Users) ShareFileWithUser(podName, podPassword, podFileWithPath, destinationRef string, userInfo *Info, pod *pod.Pod, userAddress utils.Address) (string, error) { totalFilePath := utils.CombinePathAndFile(podFileWithPath, "") - meta, err := userInfo.file.GetMetaFromFileName(totalFilePath, userAddress) + meta, err := userInfo.file.GetMetaFromFileName(totalFilePath, podPassword, userAddress) if err != nil { // skipcq: TCV-001 return "", err } - // Create a outbox entry + sharingMeta := &SharingMetaData{ + Version: meta.Version, + Path: meta.Path, + Name: meta.Name, + SharedPassword: podPassword, + Size: meta.Size, + BlockSize: meta.BlockSize, + ContentType: meta.ContentType, + Compression: meta.Compression, + CreationTime: meta.CreationTime, + AccessTime: meta.AccessTime, + ModificationTime: meta.ModificationTime, + InodeAddress: meta.InodeAddress, + } + + // Create an outbox entry now := time.Now() sharingEntry := SharingEntry{ - Meta: meta, + Meta: sharingMeta, Sender: userAddress.String(), Receiver: destinationRef, SharedTime: strconv.FormatInt(now.Unix(), 10), @@ -72,7 +101,7 @@ func (u *Users) ShareFileWithUser(podName, podFileWithPath, destinationRef strin return "", err } - //encrypt data + // encrypt data encryptedData, err := encryptData(data, now.Unix()) if err != nil { // skipcq: TCV-001 return "", err @@ -118,7 +147,7 @@ func (u *Users) ReceiveFileFromUser(podName string, sharingRef utils.SharingRefe return "", pod.ErrPodNotOpened } - podInfo, err := pd.GetPodInfoFromPodMap(podName) + podInfo, _, err := pd.GetPodInfoFromPodMap(podName) if err != nil { // skipcq: TCV-001 return "", err } @@ -137,8 +166,6 @@ func (u *Users) ReceiveFileFromUser(podName string, sharingRef utils.SharingRefe now := time.Now().Unix() newMeta := f.MetaData{ Version: sharingEntry.Meta.Version, - UserAddress: podInfo.GetPodAddress(), - PodName: podName, Path: podDir, Name: fileNameToAdd, Size: sharingEntry.Meta.Size, @@ -152,11 +179,11 @@ func (u *Users) ReceiveFileFromUser(podName string, sharingRef utils.SharingRefe } file.AddToFileMap(totalPath, &newMeta) - err = file.PutMetaForFile(&newMeta) + err = file.PutMetaForFile(&newMeta, podInfo.GetPodPassword()) if err != nil { // skipcq: TCV-001 return "", err } - err = dir.AddEntryToDir(podDir, fileNameToAdd, true) + err = dir.AddEntryToDir(podDir, podInfo.GetPodPassword(), fileNameToAdd, true) if err != nil { // skipcq: TCV-001 return "", err } @@ -206,10 +233,14 @@ func (u *Users) ReceiveFileInfo(sharingRef utils.SharingReference) (*ReceiveFile if err != nil { // skipcq: TCV-001 return nil, err } - fileInodeBytes, respCode, err := u.client.DownloadBlob(sharingEntry.Meta.InodeAddress) + encryptedFileInodeBytes, respCode, err := u.client.DownloadBlob(sharingEntry.Meta.InodeAddress) if err != nil || respCode != http.StatusOK { // skipcq: TCV-001 return nil, err } + fileInodeBytes, err := utils.DecryptBytes([]byte(sharingEntry.Meta.SharedPassword), encryptedFileInodeBytes) + if err != nil { // skipcq: TCV-001 + return nil, err + } var fileInode f.INode err = json.Unmarshal(fileInodeBytes, &fileInode) if err != nil { // skipcq: TCV-001 @@ -223,7 +254,6 @@ func (u *Users) ReceiveFileInfo(sharingRef utils.SharingReference) (*ReceiveFile NumberOfBlocks: strconv.FormatInt(int64(len(fileInode.Blocks)), 10), ContentType: sharingEntry.Meta.ContentType, Compression: sharingEntry.Meta.Compression, - PodName: sharingEntry.Meta.PodName, Sender: sharingEntry.Sender, Receiver: sharingEntry.Receiver, SharedTime: sharingEntry.SharedTime, diff --git a/pkg/user/sharing_test.go b/pkg/user/sharing_test.go index 329e68c6..16d68728 100644 --- a/pkg/user/sharing_test.go +++ b/pkg/user/sharing_test.go @@ -20,10 +20,10 @@ import ( "crypto/rand" "errors" "io" - "io/ioutil" "os" "strconv" "testing" + "time" "github.com/fairdatasociety/fairOS-dfs/pkg/account" "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" @@ -34,6 +34,7 @@ import ( "github.com/fairdatasociety/fairOS-dfs/pkg/pod" "github.com/fairdatasociety/fairOS-dfs/pkg/user" "github.com/fairdatasociety/fairOS-dfs/pkg/utils" + "github.com/plexsysio/taskmanager" ) func TestSharing(t *testing.T) { @@ -41,93 +42,95 @@ func TestSharing(t *testing.T) { logger := logging.New(io.Discard, 0) acc1 := account.New(logger) - _, _, err := acc1.CreateUserAccount("password", "") + _, _, err := acc1.CreateUserAccount("") if err != nil { t.Fatal(err) } - _, err = acc1.CreatePodAccount(1, "password", false) + _, err = acc1.CreatePodAccount(1, false) if err != nil { t.Fatal(err) } + tm := taskmanager.New(1, 10, time.Second*15, logger) + fd1 := feed.New(acc1.GetUserAccountInfo(), mockClient, logger) - pod1 := pod.NewPod(mockClient, fd1, acc1, logger) + pod1 := pod.NewPod(mockClient, fd1, acc1, tm, logger) podName1 := "test1" acc2 := account.New(logger) - _, _, err = acc2.CreateUserAccount("password", "") + _, _, err = acc2.CreateUserAccount("") if err != nil { t.Fatal(err) } - _, err = acc2.CreatePodAccount(1, "password", false) + _, err = acc2.CreatePodAccount(1, false) if err != nil { t.Fatal(err) } fd2 := feed.New(acc2.GetUserAccountInfo(), mockClient, logger) - pod2 := pod.NewPod(mockClient, fd2, acc2, logger) + pod2 := pod.NewPod(mockClient, fd2, acc2, tm, logger) podName2 := "test2" t.Run("sharing-user", func(t *testing.T) { ens := mock2.NewMockNamespaceManager() - //create source user + // create source user userObject1 := user.NewUsers("", mockClient, ens, logger) - _, _, _, _, ui0, err := userObject1.CreateNewUserV2("user1", "password1", "", "") + _, _, _, _, ui0, err := userObject1.CreateNewUserV2("user1", "password1", "", "", tm) if err != nil { t.Fatal(err) } - + podPassword, _ := utils.GetRandString(pod.PodPasswordLength) // create source pod - info1, err := pod1.CreatePod(podName1, "password", "") + info1, err := pod1.CreatePod(podName1, "", podPassword) if err != nil { t.Fatalf("error creating pod %s", podName1) } ui0.AddPodName(podName1, info1) // make root dir so that other directories can be added - err = info1.GetDirectory().MkRootDir("pod1", info1.GetPodAddress(), info1.GetFeed()) + err = info1.GetDirectory().MkRootDir("pod1", podPassword, info1.GetPodAddress(), info1.GetFeed()) if err != nil { t.Fatal(err) } // create dir and file dirObject1 := info1.GetDirectory() - err = dirObject1.MkDir("/parentDir1") + err = dirObject1.MkDir("/parentDir1", podPassword) if err != nil { t.Fatal(err) } fileObject1 := info1.GetFile() - _, err = uploadFile(t, fileObject1, "/parentDir1", "file1", "", 100, 10) + _, err = uploadFile(t, fileObject1, "/parentDir1", "file1", "", podPassword, 100, 10) if err != nil { t.Fatal(err) } - // share file with another user - sharingRefString, err := userObject1.ShareFileWithUser("pod1", "/parentDir1/file1", "user2", ui0, pod1, info1.GetPodAddress()) + sharingRefString, err := userObject1.ShareFileWithUser("pod1", podPassword, "/parentDir1/file1", "user2", ui0, pod1, info1.GetPodAddress()) if err != nil { t.Fatal(err) } - //create destination user + // create destination user userObject2 := user.NewUsers("", mockClient, ens, logger) - _, _, _, _, ui, err := userObject2.CreateNewUserV2("user2", "password2", "", "") + _, _, _, _, ui, err := userObject2.CreateNewUserV2("user2", "password2", "", "", tm) if err != nil { t.Fatal(err) } // create destination pod - info2, err := pod2.CreatePod(podName2, "password", "") + podPassword, _ = utils.GetRandString(pod.PodPasswordLength) + info2, err := pod2.CreatePod(podName2, "", podPassword) if err != nil { t.Fatalf("error creating pod %s", podName2) } // make root dir so that other directories can be added - err = info2.GetDirectory().MkRootDir("pod1", info2.GetPodAddress(), info2.GetFeed()) + err = info2.GetDirectory().MkRootDir("pod1", podPassword, info2.GetPodAddress(), info2.GetFeed()) if err != nil { t.Fatal(err) } // create dir and file dirObject2 := info2.GetDirectory() - err = dirObject2.MkDir("/parentDir2") + err = dirObject2.MkDir("/parentDir2", podPassword) if err != nil { t.Fatal(err) } @@ -149,9 +152,6 @@ func TestSharing(t *testing.T) { if receiveFileInfo.FileName != "file1" { t.Fatalf("invalid filename received") } - if receiveFileInfo.PodName != podName1 { - t.Fatalf("invalid podName received") - } if receiveFileInfo.Size != strconv.FormatUint(100, 10) { t.Fatalf("invalid file size received") } @@ -179,7 +179,7 @@ func TestSharing(t *testing.T) { if destinationFilePath != "/parentDir2/file1" { t.Fatalf("invalid destination file name") } - _, files, err := dirObject2.ListDir("/parentDir2") + _, files, err := dirObject2.ListDir("/parentDir2", podPassword) if err != nil { t.Fatal(err) } @@ -207,9 +207,9 @@ func TestSharing(t *testing.T) { }) } -func uploadFile(t *testing.T, fileObject *file.File, filePath, fileName, compression string, fileSize int64, blockSize uint32) ([]byte, error) { +func uploadFile(t *testing.T, fileObject *file.File, filePath, fileName, compression, podPassword string, fileSize int64, blockSize uint32) ([]byte, error) { // create a temp file - fd, err := ioutil.TempFile("", fileName) + fd, err := os.CreateTemp("", fileName) if err != nil { t.Fatal(err) } @@ -239,5 +239,5 @@ func uploadFile(t *testing.T, fileObject *file.File, filePath, fileName, compres } // upload the temp file - return content, fileObject.Upload(f1, fileName, fileSize, blockSize, filePath, compression) + return content, fileObject.Upload(f1, fileName, fileSize, blockSize, filePath, compression, podPassword) } diff --git a/pkg/user/stat.go b/pkg/user/stat.go index 151a8aae..a892ebc8 100644 --- a/pkg/user/stat.go +++ b/pkg/user/stat.go @@ -19,7 +19,7 @@ package user import "github.com/fairdatasociety/fairOS-dfs/pkg/account" type Stat struct { - Name string `json:"user_name"` + Name string `json:"userName"` Reference string `json:"address"` } diff --git a/pkg/user/stat_test.go b/pkg/user/stat_test.go index 2288afda..616c2a4a 100644 --- a/pkg/user/stat_test.go +++ b/pkg/user/stat_test.go @@ -20,6 +20,9 @@ import ( "errors" "io" "testing" + "time" + + "github.com/plexsysio/taskmanager" "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore/bee/mock" mock2 "github.com/fairdatasociety/fairOS-dfs/pkg/ensm/eth/mock" @@ -29,10 +32,11 @@ import ( func TestStat(t *testing.T) { mockClient := mock.NewMockBeeClient() logger := logging.New(io.Discard, 0) + tm := taskmanager.New(1, 10, time.Second*15, logger) t.Run("stat-nonexistent-user", func(t *testing.T) { ens := mock2.NewMockNamespaceManager() - //create user + // create user userObject := NewUsers("", mockClient, ens, logger) ui := &Info{ name: "user1123123", @@ -46,9 +50,9 @@ func TestStat(t *testing.T) { t.Run("stat-user", func(t *testing.T) { ens := mock2.NewMockNamespaceManager() - //create user + // create user userObject := NewUsers("", mockClient, ens, logger) - _, _, _, _, ui, err := userObject.CreateNewUserV2("user1", "password1", "", "") + _, _, _, _, ui, err := userObject.CreateNewUserV2("user1", "password1", "", "", tm) if err != nil { t.Fatal(err) } diff --git a/pkg/user/store_mnemonic.go b/pkg/user/store_mnemonic.go deleted file mode 100644 index c6842d9c..00000000 --- a/pkg/user/store_mnemonic.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright © 2020 FairOS Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package user - -import ( - "github.com/fairdatasociety/fairOS-dfs/pkg/blockstore" - "github.com/fairdatasociety/fairOS-dfs/pkg/feed" - "github.com/fairdatasociety/fairOS-dfs/pkg/utils" -) - -func (*Users) uploadEncryptedMnemonic(userName string, address utils.Address, encryptedMnemonic string, fd *feed.API) error { - topic := utils.HashString(userName) - data := []byte(encryptedMnemonic) - _, err := fd.CreateFeed(topic, address, data) - return err -} - -func (*Users) getEncryptedMnemonic(userName string, address utils.Address, fd *feed.API) (string, error) { - topic := utils.HashString(userName) - _, data, err := fd.GetFeedData(topic, address) - if err != nil { // skipcq: TCV-001 - return "", err - } - return string(data), nil -} - -func (*Users) deleteMnemonic(userName string, address utils.Address, fd *feed.API, client blockstore.Client) error { - topic := utils.HashString(userName) - feedAddress, _, err := fd.GetFeedData(topic, address) - if err != nil { // skipcq: TCV-001 - return err - } - return client.DeleteReference(feedAddress) -} diff --git a/pkg/utils/encrypt.go b/pkg/utils/encrypt.go new file mode 100644 index 00000000..583cd896 --- /dev/null +++ b/pkg/utils/encrypt.go @@ -0,0 +1,57 @@ +package utils + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "crypto/sha256" + "errors" + "io" +) + +func EncryptBytes(passphrase, message []byte) ([]byte, error) { // skipcq: TCV-001 + aesKey := sha256.Sum256(passphrase) + block, err := aes.NewCipher(aesKey[:]) + if err != nil { // skipcq: TCV-001 + return nil, err + } + + // IV needs to be unique, but doesn't have to be secure. + // It's common to put it at the beginning of the ciphertext. + cipherText := make([]byte, aes.BlockSize+len(message)) + iv := cipherText[:aes.BlockSize] + if _, err = io.ReadFull(rand.Reader, iv); err != nil { // skipcq: TCV-001 + return nil, err + } + stream := cipher.NewCFBEncrypter(block, iv) + stream.XORKeyStream(cipherText[aes.BlockSize:], message) + + return cipherText, nil +} + +func DecryptBytes(passphrase, cipherText []byte) ([]byte, error) { + aesKey := sha256.Sum256(passphrase) + block, err := aes.NewCipher(aesKey[:]) + if err != nil { // skipcq: TCV-001 + return nil, err + } + + if len(cipherText) < aes.BlockSize { // skipcq: TCV-001 + err = errors.New("ciphertext block size is too short") + return nil, err + } + + temp := make([]byte, len(cipherText)) + copy(temp, cipherText) + + // IV needs to be unique, but doesn't have to be secure. + // It's common to put it at the beginning of the ciphertext. + iv := temp[:aes.BlockSize] + temp = temp[aes.BlockSize:] + + stream := cipher.NewCFBDecrypter(block, iv) + // XORKeyStream can work in-place if the two arguments are the same. + stream.XORKeyStream(temp, temp) + + return temp, nil +} diff --git a/pkg/utils/reference.go b/pkg/utils/reference.go index 9c4aa2d0..a9260bef 100644 --- a/pkg/utils/reference.go +++ b/pkg/utils/reference.go @@ -25,7 +25,7 @@ const ( // Reference is used for creating pod sharing references type Reference struct { - R []byte + R []byte `json:"swarm"` } // NewReference creates a Reference diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index e0dc7cb8..7888aeee 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -24,7 +24,6 @@ import ( "fmt" "hash" "math/big" - "os" "strconv" "strings" @@ -38,8 +37,8 @@ const ( // MaxChunkLength is the maximum size of a chunk MaxChunkLength = 4096 - // PathSeparator is string of os.PathSeparator - PathSeparator = string(os.PathSeparator) + // PathSeparator is string of unix filesystem + PathSeparator = "/" // MaxPodNameLength defines how long a pod name can be MaxPodNameLength = 64 @@ -179,8 +178,10 @@ func NewChunkWithoutSpan(data []byte) (swarm.Chunk, error) { // CombinePathAndFile joins filename with provided path func CombinePathAndFile(path, fileName string) string { + if path == PathSeparator && fileName == PathSeparator { + return PathSeparator + } var totalPath string - if path == PathSeparator || path == "" { fileName = strings.TrimPrefix(fileName, PathSeparator) totalPath = PathSeparator + fileName diff --git a/swagger/docs.go b/swagger/docs.go new file mode 100644 index 00000000..0b9b72ce --- /dev/null +++ b/swagger/docs.go @@ -0,0 +1,4039 @@ +// Package swagger GENERATED BY SWAG; DO NOT EDIT +// This file was generated by swaggo/swag +package swagger + +import "github.com/swaggo/swag" + +const docTemplate = `{ + "schemes": {{ marshal .Schemes }}, + "swagger": "2.0", + "info": { + "description": "{{escape .Description}}", + "title": "{{.Title}}", + "contact": {}, + "license": { + "name": "Apache 2.0", + "url": "http://www.apache.org/licenses/LICENSE-2.0.html" + }, + "version": "{{.Version}}" + }, + "host": "{{.Host}}", + "basePath": "{{.BasePath}}", + "paths": { + "/v1/dir/ls": { + "get": { + "description": "DirectoryLsHandler is the api handler for listing the contents of a directory.", + "produces": [ + "application/json" + ], + "tags": [ + "dir" + ], + "summary": "List directory", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "dir path", + "name": "dirPath", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.ListFileResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/dir/mkdir": { + "post": { + "description": "DirectoryMkdirHandler is the api handler to create a new directory.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "dir" + ], + "summary": "Create directory", + "parameters": [ + { + "description": "pod name and dir path", + "name": "dir_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.DirRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "201": { + "description": "Created", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/dir/present": { + "get": { + "description": "DirectoryPresentHandler is the api handler which says if a directory is present or not", + "produces": [ + "application/json" + ], + "tags": [ + "dir" + ], + "summary": "Is directory present", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "dir path", + "name": "dirPath", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.DirPresentResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/dir/rename": { + "post": { + "description": "DirectoryRenameHandler is the api handler to rename a directory.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "dir" + ], + "summary": "Rename directory", + "parameters": [ + { + "description": "old name and new path", + "name": "dir_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/common.RenameRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/dir/rmdir": { + "delete": { + "description": "DirectoryRmdirHandler is the api handler to remove a directory.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "dir" + ], + "summary": "Remove directory", + "parameters": [ + { + "description": "pod name and dir path", + "name": "dir_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.DirRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/dir/stat": { + "get": { + "description": "DirectoryStatHandler is the api handler which gives the information about a directory", + "produces": [ + "application/json" + ], + "tags": [ + "dir" + ], + "summary": "Directory stat", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "dir path", + "name": "dirPath", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/dir.Stats" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/doc/count": { + "post": { + "description": "DocCountHandler is the api handler to count the number of documents in a given document database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "doc" + ], + "summary": "Count number of document in a table", + "parameters": [ + { + "description": "doc table info", + "name": "doc_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.DocCountRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/collection.TableKeyCount" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/doc/delete": { + "delete": { + "description": "DocDeleteHandler is the api handler to delete the given document database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "doc" + ], + "summary": "Delete a doc table", + "parameters": [ + { + "description": "doc table info", + "name": "doc_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.SimpleDocRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/doc/entry/delete": { + "delete": { + "description": "DocEntryDelHandler is the api handler to delete a document from a document datastore", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "doc" + ], + "summary": "Delete a document from a document datastore", + "parameters": [ + { + "type": "string", + "name": "id", + "in": "query" + }, + { + "type": "string", + "name": "podName", + "in": "query" + }, + { + "type": "string", + "name": "tableName", + "in": "query" + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/doc/entry/get": { + "get": { + "description": "DocEntryGetHandler is the api handler to get a document from a document datastore", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "doc" + ], + "summary": "Get a document from a document datastore", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "table name", + "name": "tableName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "id to search for", + "name": "id", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.DocGetResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/doc/entry/put": { + "post": { + "description": "DocEntryPutHandler is the api handler add a document in to a document datastore", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "doc" + ], + "summary": "Add a record in document datastore", + "parameters": [ + { + "type": "string", + "name": "doc", + "in": "query" + }, + { + "type": "string", + "name": "podName", + "in": "query" + }, + { + "type": "string", + "name": "tableName", + "in": "query" + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/doc/find": { + "get": { + "description": "DocFindHandler is the api handler to select rows from a given document datastore", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "doc" + ], + "summary": "Get rows from a given doc datastore", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "table name", + "name": "tableName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "expression to search for", + "name": "expr", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "number od documents", + "name": "limit", + "in": "query" + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.DocFindResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/doc/indexjson": { + "post": { + "description": "DocIndexJsonHandler is the api handler to index a json file that is present in a pod, in to the given document database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "doc" + ], + "summary": "Index a json file that is present in a pod, in to the given document database", + "parameters": [ + { + "description": "index request", + "name": "index_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.DocIndexRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/doc/loadjson": { + "post": { + "description": "DocLoadJsonHandler is the api handler that indexes a json file that is present in the local file system", + "consumes": [ + "multipart/form-data" + ], + "produces": [ + "application/json" + ], + "tags": [ + "doc" + ], + "summary": "Load json file from local file system", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "table name", + "name": "tableName", + "in": "query", + "required": true + }, + { + "type": "file", + "description": "json to index", + "name": "json", + "in": "formData", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/doc/ls": { + "get": { + "description": "DocListHandler is the api handler which lists all the document database in a pod", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "doc" + ], + "summary": "List all doc table", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.DocumentDBs" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/doc/new": { + "post": { + "description": "DocCreateHandler is the api handler to create a new document database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "doc" + ], + "summary": "Create in doc table", + "parameters": [ + { + "description": "doc table info", + "name": "doc_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.DocRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "201": { + "description": "Created", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/doc/open": { + "post": { + "description": "DocOpenHandler is the api handler to open a document database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "doc" + ], + "summary": "Open a doc table", + "parameters": [ + { + "description": "doc table info", + "name": "doc_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.DocRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.DocumentDBs" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/file/delete": { + "delete": { + "description": "FileReceiveHandler is the api handler to delete a file from a given pod", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "file" + ], + "summary": "Delete a file", + "parameters": [ + { + "description": "pod name and file path", + "name": "file_delete_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.FileDeleteRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "404": { + "description": "Not Found", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/file/download": { + "get": { + "description": "FileDownloadHandlerGet is the api handler to download a file from a given pod", + "consumes": [ + "application/json" + ], + "produces": [ + "*/*" + ], + "tags": [ + "file" + ], + "summary": "Download a file", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "file path", + "name": "filePath", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "type": "integer" + } + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + }, + "post": { + "description": "FileDownloadHandlerPost is the api handler to download a file from a given pod", + "consumes": [ + "multipart/form-data" + ], + "produces": [ + "*/*" + ], + "tags": [ + "file" + ], + "summary": "Download a file", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "formData", + "required": true + }, + { + "type": "string", + "description": "file path", + "name": "filePath", + "in": "formData", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "type": "integer" + } + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/file/receive": { + "get": { + "description": "FileReceiveHandler is the api handler to receive a file in a given pod", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "file" + ], + "summary": "Receive a file", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "sharing reference", + "name": "sharingRef", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "file location", + "name": "dirPath", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.FileSharingReference" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/file/receiveinfo": { + "get": { + "description": "FileReceiveInfoHandler is the api handler to receive a file info", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "file" + ], + "summary": "Receive a file info", + "parameters": [ + { + "type": "string", + "description": "sharing reference", + "name": "sharingRef", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/user.ReceiveFileInfo" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/file/rename": { + "post": { + "description": "FileRenameHandler is the api handler to get the information of a file", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "file" + ], + "summary": "Info of a file", + "parameters": [ + { + "description": "old name \u0026 new name", + "name": "rename_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/common.RenameRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "404": { + "description": "Not Found", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/file/share": { + "post": { + "description": "FileShareHandler is the api handler to share a file from a given pod", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "file" + ], + "summary": "Share a file", + "parameters": [ + { + "description": "file share request params", + "name": "file_share_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.FileShareRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.FileSharingReference" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/file/stat": { + "get": { + "description": "FileStatHandler is the api handler to get the information of a file", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "file" + ], + "summary": "Info of a file", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "file path", + "name": "filePath", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/file.Stats" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/file/upload": { + "post": { + "description": "FileUploadHandler is the api handler to upload a file from a local file system to the dfs", + "consumes": [ + "multipart/form-data" + ], + "produces": [ + "application/json" + ], + "tags": [ + "file" + ], + "summary": "Upload a file", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "formData", + "required": true + }, + { + "type": "string", + "description": "location", + "name": "dirPath", + "in": "formData", + "required": true + }, + { + "type": "string", + "example": "4Kb, 1Mb", + "description": "block size to break the file", + "name": "blockSize", + "in": "formData", + "required": true + }, + { + "type": "file", + "description": "file to upload", + "name": "files", + "in": "formData", + "required": true + }, + { + "type": "string", + "example": "snappy, gzip", + "description": "cookie parameter", + "name": "fairOS-dfs-Compression", + "in": "header" + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/kv/count": { + "post": { + "description": "KVCountHandler is the api handler to count the number of rows in a key value table", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "kv" + ], + "summary": "Count rows in a key value table", + "parameters": [ + { + "description": "kv table request", + "name": "kv_table_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.KVTableRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/collection.TableKeyCount" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/kv/delete": { + "delete": { + "description": "KVDeleteHandler is the api handler to delete a key value table", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "kv" + ], + "summary": "Delete a key value table", + "parameters": [ + { + "description": "kv table request", + "name": "kv_table_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.KVTableRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/kv/entry/del": { + "delete": { + "description": "KVDelHandler is the api handler to delete a key and value from the kv table", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "kv" + ], + "summary": "Delete key-value from the kv table", + "parameters": [ + { + "description": "delete request", + "name": "delete_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.KVEntryDeleteRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.KVResponseRaw" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/kv/entry/get": { + "get": { + "description": "KVGetHandler is the api handler to get a value from the kv table", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "kv" + ], + "summary": "get value from the kv table", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "table name", + "name": "tableName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "key", + "name": "key", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.KVResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/kv/entry/get-data": { + "get": { + "description": "KVGetDataHandler is the api handler to get raw value from the kv table", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "kv" + ], + "summary": "get value from the kv table", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "table name", + "name": "tableName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "key", + "name": "key", + "in": "query", + "required": true + }, + { + "type": "string", + "example": "byte-string, string", + "description": "format of the value", + "name": "format", + "in": "query" + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.KVResponseRaw" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/kv/entry/present": { + "get": { + "description": "KVPresentHandler is the api handler to check if a value exists in the kv table", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "kv" + ], + "summary": "Check if a value exists in the kv table", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "table name", + "name": "tableName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "key", + "name": "key", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/kv/entry/put": { + "post": { + "description": "KVPutHandler is the api handler to put a key-value in the kv table", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "kv" + ], + "summary": "put key and value in the kv table", + "parameters": [ + { + "description": "kv entry", + "name": "kv_entry", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.KVEntryRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/kv/export": { + "post": { + "description": "KVExportHandler is the api handler to export from a particular key with the given prefix", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "kv" + ], + "summary": "Export from a particular key with the given prefix", + "parameters": [ + { + "description": "kv export info", + "name": "export_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.KVExportRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": true + } + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/kv/loadcsv": { + "post": { + "description": "KVLoadCSVHandler is the api handler to load a csv file as key and value in a KV table", + "consumes": [ + "multipart/form-data" + ], + "produces": [ + "application/json" + ], + "tags": [ + "kv" + ], + "summary": "Upload a csv file in kv table", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "formData", + "required": true + }, + { + "type": "string", + "description": "table name", + "name": "tableName", + "in": "formData", + "required": true + }, + { + "type": "string", + "description": "keep in memory", + "name": "memory", + "in": "formData" + }, + { + "type": "file", + "description": "file to upload", + "name": "csv", + "in": "formData", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/kv/ls": { + "get": { + "description": "KVListHandler is the api handler to list all the key value tables in a pod", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "kv" + ], + "summary": "List all key value tables", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.Collections" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/kv/new": { + "post": { + "description": "KVCreateHandler is the api handler to create a key value table", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "kv" + ], + "summary": "Create a key value table", + "parameters": [ + { + "description": "kv table request", + "name": "kv_table_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.KVTableRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "201": { + "description": "Created", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/kv/open": { + "post": { + "description": "KVOpenHandler is the api handler to open a key value table", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "kv" + ], + "summary": "Open a key value table", + "parameters": [ + { + "description": "kv table request", + "name": "kv_table_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.KVTableRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "201": { + "description": "Created", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/kv/seek": { + "post": { + "description": "KVSeekHandler is the api handler to seek to a particular key with the given prefix", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "kv" + ], + "summary": "Seek in kv table", + "parameters": [ + { + "description": "kv seek info", + "name": "export_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.KVExportRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/kv/seek/next": { + "post": { + "description": "KVGetNextHandler is the api handler to get the key and value from the current seek position", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "kv" + ], + "summary": "Get next value from last seek in kv table", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "table name", + "name": "tableName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.KVResponse" + } + }, + "204": { + "description": "No Content", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/pod/close": { + "post": { + "description": "PodCloseHandler is the api handler to close an open pod", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "pod" + ], + "summary": "Close pod", + "parameters": [ + { + "description": "pod name", + "name": "pod_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.PodNameRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/pod/delete": { + "delete": { + "description": "PodDeleteHandler is the api handler to delete a pod", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "pod" + ], + "summary": "Delete pod", + "parameters": [ + { + "description": "pod name and user password", + "name": "pod_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.PodNameRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/pod/ls": { + "get": { + "description": "PodListHandler is the api handler to list all pods", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "pod" + ], + "summary": "List pods", + "parameters": [ + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.PodListResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/pod/new": { + "post": { + "description": "PodCreateHandler is the api handler to create a new pod", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "pod" + ], + "summary": "Create pod", + "parameters": [ + { + "description": "pod name and user password", + "name": "pod_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.PodNameRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "201": { + "description": "Created", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/pod/open": { + "post": { + "description": "PodOpenHandler is the api handler to open pod", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "pod" + ], + "summary": "Open pod", + "parameters": [ + { + "description": "pod name and user password", + "name": "pod_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.PodNameRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/pod/open-async": { + "post": { + "description": "PodOpenAsyncHandler is the api handler to open pod asynchronously", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "pod" + ], + "summary": "Open pod", + "parameters": [ + { + "description": "pod name and user password", + "name": "pod_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.PodNameRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/pod/present": { + "get": { + "description": "PodPresentHandler is the api handler to check if a pod is present", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "pod" + ], + "summary": "Is pod present", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/pod/receive": { + "get": { + "description": "PodReceiveHandler is the api handler to receive shared pod from shared reference", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "pod" + ], + "summary": "Receive shared pod", + "parameters": [ + { + "type": "string", + "description": "pod sharing reference", + "name": "sharingRef", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "pod name to be saved as", + "name": "sharedPodName", + "in": "query" + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/pod/receiveinfo": { + "get": { + "description": "PodReceiveInfoHandler is the api handler to receive shared pod info from shared reference", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "pod" + ], + "summary": "Receive shared pod info", + "parameters": [ + { + "type": "string", + "description": "pod sharing reference", + "name": "sharingRef", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/pod.ShareInfo" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/pod/share": { + "post": { + "description": "PodShareHandler is the api handler to share a pod to the public", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "pod" + ], + "summary": "Share pod", + "parameters": [ + { + "description": "pod name and user password", + "name": "pod_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/common.PodShareRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.PodSharingReference" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/pod/stat": { + "get": { + "description": "PodStatHandler is the api handler get information about a pod", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "pod" + ], + "summary": "Stats for pod", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.PodStatResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/pod/sync": { + "post": { + "description": "PodSyncHandler is the api handler to sync a pod's content", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "pod" + ], + "summary": "Sync pod", + "parameters": [ + { + "description": "pod name", + "name": "pod_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.PodNameRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/user/delete": { + "post": { + "tags": [ + "user" + ], + "deprecated": true, + "responses": {} + } + }, + "/v1/user/export": { + "post": { + "tags": [ + "user" + ], + "deprecated": true, + "responses": {} + } + }, + "/v1/user/isloggedin": { + "get": { + "description": "Check if the given user is logged-in", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "user" + ], + "summary": "Is user logged-in", + "parameters": [ + { + "type": "string", + "description": "user name", + "name": "userName", + "in": "query", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.LoginStatus" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/user/login": { + "post": { + "tags": [ + "user" + ], + "deprecated": true, + "responses": {} + } + }, + "/v1/user/logout": { + "post": { + "description": "logs-out user", + "consumes": [ + "application/json" + ], + "tags": [ + "user" + ], + "summary": "Logout", + "parameters": [ + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/user/present": { + "get": { + "tags": [ + "user" + ], + "deprecated": true, + "responses": {} + } + }, + "/v1/user/signup": { + "post": { + "tags": [ + "user" + ], + "deprecated": true, + "responses": {} + } + }, + "/v1/user/stat": { + "get": { + "description": "show user stats", + "consumes": [ + "application/json" + ], + "tags": [ + "user" + ], + "summary": "User stat", + "parameters": [ + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/user.Stat" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v2/user/delete": { + "delete": { + "description": "deletes user info from swarm", + "produces": [ + "application/json" + ], + "tags": [ + "user" + ], + "summary": "Delete user for ENS based authentication", + "parameters": [ + { + "description": "user delete request", + "name": "UserDeleteRequest", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.UserDeleteRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v2/user/login": { + "post": { + "description": "login user with the new ENS based authentication", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "user" + ], + "summary": "Login User", + "parameters": [ + { + "description": "user name", + "name": "user_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/common.UserLoginRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.UserLoginResponse" + }, + "headers": { + "Set-Cookie": { + "type": "string", + "description": "fairos-dfs session" + } + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "404": { + "description": "Not Found", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v2/user/present": { + "get": { + "description": "checks if the new user is present in the new ENS based authentication", + "produces": [ + "application/json" + ], + "tags": [ + "user" + ], + "summary": "Check if user is present", + "parameters": [ + { + "type": "string", + "description": "user name", + "name": "userName", + "in": "query", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.PresentResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v2/user/signup": { + "post": { + "description": "registers new user with the new ENS based authentication", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "user" + ], + "summary": "Register New User", + "parameters": [ + { + "description": "user name", + "name": "user_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/common.UserSignupRequest" + } + } + ], + "responses": { + "201": { + "description": "Created", + "schema": { + "$ref": "#/definitions/api.UserSignupResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "402": { + "description": "Payment Required", + "schema": { + "$ref": "#/definitions/api.UserSignupResponse" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + } + }, + "definitions": { + "api.Collection": { + "type": "object", + "properties": { + "indexes": { + "type": "array", + "items": { + "type": "string" + } + }, + "tableName": { + "type": "string" + }, + "type": { + "type": "string" + } + } + }, + "api.Collections": { + "type": "object", + "properties": { + "tables": { + "type": "array", + "items": { + "$ref": "#/definitions/api.Collection" + } + } + } + }, + "api.DirPresentResponse": { + "type": "object", + "properties": { + "error": { + "type": "string" + }, + "present": { + "type": "boolean" + } + } + }, + "api.DirRequest": { + "type": "object", + "properties": { + "dirPath": { + "type": "string" + }, + "podName": { + "type": "string" + } + } + }, + "api.DocCountRequest": { + "type": "object", + "properties": { + "expr": { + "type": "string" + }, + "mutable": { + "type": "boolean" + }, + "podName": { + "type": "string" + }, + "si": { + "type": "string" + }, + "tableName": { + "type": "string" + } + } + }, + "api.DocFindResponse": { + "type": "object", + "properties": { + "docs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "integer" + } + } + } + } + }, + "api.DocGetResponse": { + "type": "object", + "properties": { + "doc": { + "type": "array", + "items": { + "type": "integer" + } + } + } + }, + "api.DocIndexRequest": { + "type": "object", + "properties": { + "fileName": { + "type": "string" + }, + "podName": { + "type": "string" + }, + "tableName": { + "type": "string" + } + } + }, + "api.DocRequest": { + "type": "object", + "properties": { + "mutable": { + "type": "boolean" + }, + "podName": { + "type": "string" + }, + "si": { + "type": "string" + }, + "tableName": { + "type": "string" + } + } + }, + "api.DocumentDBs": { + "type": "object", + "properties": { + "tables": { + "type": "array", + "items": { + "$ref": "#/definitions/api.documentDB" + } + } + } + }, + "api.FileDeleteRequest": { + "type": "object", + "properties": { + "filePath": { + "type": "string" + }, + "podName": { + "type": "string" + } + } + }, + "api.FileShareRequest": { + "type": "object", + "properties": { + "destUser": { + "type": "string" + }, + "filePath": { + "type": "string" + }, + "podName": { + "type": "string" + } + } + }, + "api.FileSharingReference": { + "type": "object", + "properties": { + "fileSharingReference": { + "type": "string" + } + } + }, + "api.KVEntryDeleteRequest": { + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "podName": { + "type": "string" + }, + "tableName": { + "type": "string" + } + } + }, + "api.KVEntryRequest": { + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "podName": { + "type": "string" + }, + "tableName": { + "type": "string" + }, + "value": { + "type": "string" + } + } + }, + "api.KVExportRequest": { + "type": "object", + "properties": { + "endPrefix": { + "type": "string" + }, + "limit": { + "type": "string" + }, + "podName": { + "type": "string" + }, + "startPrefix": { + "type": "string" + }, + "tableName": { + "type": "string" + } + } + }, + "api.KVResponse": { + "type": "object", + "properties": { + "keys": { + "type": "array", + "items": { + "type": "string" + } + }, + "values": { + "type": "array", + "items": { + "type": "integer" + } + } + } + }, + "api.KVResponseRaw": { + "type": "object", + "properties": { + "keys": { + "type": "array", + "items": { + "type": "string" + } + }, + "values": { + "type": "string" + } + } + }, + "api.KVTableRequest": { + "type": "object", + "properties": { + "indexType": { + "type": "string" + }, + "podName": { + "type": "string" + }, + "tableName": { + "type": "string" + } + } + }, + "api.ListFileResponse": { + "type": "object", + "properties": { + "dirs": { + "type": "array", + "items": { + "$ref": "#/definitions/dir.Entry" + } + }, + "files": { + "type": "array", + "items": { + "$ref": "#/definitions/file.Entry" + } + } + } + }, + "api.LoginStatus": { + "type": "object", + "properties": { + "loggedin": { + "type": "boolean" + } + } + }, + "api.PodListResponse": { + "type": "object", + "properties": { + "podName": { + "type": "array", + "items": { + "type": "string" + } + }, + "sharedPodName": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "api.PodNameRequest": { + "type": "object", + "properties": { + "podName": { + "type": "string" + } + } + }, + "api.PodSharingReference": { + "type": "object", + "properties": { + "podSharingReference": { + "type": "string" + } + } + }, + "api.PodStatResponse": { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "podName": { + "type": "string" + } + } + }, + "api.PresentResponse": { + "type": "object", + "properties": { + "present": { + "type": "boolean" + } + } + }, + "api.SimpleDocRequest": { + "type": "object", + "properties": { + "podName": { + "type": "string" + }, + "tableName": { + "type": "string" + } + } + }, + "api.UserDeleteRequest": { + "type": "object", + "properties": { + "password": { + "type": "string" + } + } + }, + "api.UserLoginResponse": { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "message": { + "type": "string" + }, + "nameHash": { + "type": "string" + }, + "publicKey": { + "type": "string" + } + } + }, + "api.UserSignupResponse": { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "message": { + "type": "string" + }, + "mnemonic": { + "type": "string" + }, + "nameHash": { + "type": "string" + }, + "publicKey": { + "type": "string" + } + } + }, + "api.documentDB": { + "type": "object", + "properties": { + "indexes": { + "type": "array", + "items": { + "$ref": "#/definitions/collection.SIndex" + } + }, + "tableName": { + "type": "string" + }, + "type": { + "type": "string" + } + } + }, + "api.response": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + } + }, + "collection.SIndex": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "type": { + "type": "integer" + } + } + }, + "collection.TableKeyCount": { + "type": "object", + "properties": { + "count": { + "type": "integer" + }, + "table_name": { + "type": "string" + } + } + }, + "common.PodShareRequest": { + "type": "object", + "properties": { + "podName": { + "type": "string" + }, + "sharedPodName": { + "type": "string" + } + } + }, + "common.RenameRequest": { + "type": "object", + "properties": { + "newPath": { + "type": "string" + }, + "oldPath": { + "type": "string" + }, + "podName": { + "type": "string" + } + } + }, + "common.UserLoginRequest": { + "type": "object", + "properties": { + "password": { + "type": "string" + }, + "userName": { + "type": "string" + } + } + }, + "common.UserSignupRequest": { + "type": "object", + "properties": { + "mnemonic": { + "type": "string" + }, + "password": { + "type": "string" + }, + "userName": { + "type": "string" + } + } + }, + "dir.Entry": { + "type": "object", + "properties": { + "accessTime": { + "type": "string" + }, + "blockSize": { + "type": "string" + }, + "contentType": { + "type": "string" + }, + "creationTime": { + "type": "string" + }, + "modificationTime": { + "type": "string" + }, + "name": { + "type": "string" + }, + "size": { + "type": "string" + } + } + }, + "dir.Stats": { + "type": "object", + "properties": { + "accessTime": { + "type": "string" + }, + "creationTime": { + "type": "string" + }, + "dirName": { + "type": "string" + }, + "dirPath": { + "type": "string" + }, + "modificationTime": { + "type": "string" + }, + "noOfDirectories": { + "type": "string" + }, + "noOfFiles": { + "type": "string" + }, + "podName": { + "type": "string" + } + } + }, + "file.Blocks": { + "type": "object", + "properties": { + "compressedSize": { + "type": "string" + }, + "reference": { + "type": "string" + }, + "size": { + "type": "string" + } + } + }, + "file.Entry": { + "type": "object", + "properties": { + "accessTime": { + "type": "string" + }, + "blockSize": { + "type": "string" + }, + "contentType": { + "type": "string" + }, + "creationTime": { + "type": "string" + }, + "modificationTime": { + "type": "string" + }, + "name": { + "type": "string" + }, + "size": { + "type": "string" + } + } + }, + "file.Stats": { + "type": "object", + "properties": { + "accessTime": { + "type": "string" + }, + "blockSize": { + "type": "string" + }, + "blocks": { + "type": "array", + "items": { + "$ref": "#/definitions/file.Blocks" + } + }, + "compression": { + "type": "string" + }, + "contentType": { + "type": "string" + }, + "creationTime": { + "type": "string" + }, + "fileName": { + "type": "string" + }, + "filePath": { + "type": "string" + }, + "fileSize": { + "type": "string" + }, + "modificationTime": { + "type": "string" + }, + "podName": { + "type": "string" + } + } + }, + "pod.ShareInfo": { + "type": "object", + "properties": { + "password": { + "type": "string" + }, + "podAddress": { + "type": "string" + }, + "podName": { + "type": "string" + }, + "userAddress": { + "type": "string" + } + } + }, + "user.ReceiveFileInfo": { + "type": "object", + "properties": { + "blockSize": { + "type": "string" + }, + "compression": { + "type": "string" + }, + "contentType": { + "type": "string" + }, + "destAddress": { + "type": "string" + }, + "name": { + "type": "string" + }, + "numberOfBlocks": { + "type": "string" + }, + "sharedTime": { + "type": "string" + }, + "size": { + "type": "string" + }, + "sourceAddress": { + "type": "string" + } + } + }, + "user.Stat": { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "userName": { + "type": "string" + } + } + } + } +}` + +// SwaggerInfo holds exported Swagger Info so clients can modify it +var SwaggerInfo = &swag.Spec{ + Version: "v0.9.0-rc1", + Host: "http://localhost:9090", + BasePath: "", + Schemes: []string{}, + Title: "FairOS-dfs server", + Description: "A list of the currently provided Interfaces to interact with FairOS decentralised file system(dfs), implementing user, pod, file system, key value store and document store", + InfoInstanceName: "swagger", + SwaggerTemplate: docTemplate, +} + +func init() { + swag.Register(SwaggerInfo.InstanceName(), SwaggerInfo) +} diff --git a/swagger/swagger.json b/swagger/swagger.json new file mode 100644 index 00000000..5ed48da2 --- /dev/null +++ b/swagger/swagger.json @@ -0,0 +1,4018 @@ +{ + "swagger": "2.0", + "info": { + "description": "A list of the currently provided Interfaces to interact with FairOS decentralised file system(dfs), implementing user, pod, file system, key value store and document store", + "title": "FairOS-dfs server", + "contact": { + "name": "Sabyasachi Patra", + "email": "sabyasachi@datafund.io" + }, + "license": { + "name": "Apache 2.0", + "url": "http://www.apache.org/licenses/LICENSE-2.0.html" + }, + "version": "v0.9.0-rc1" + }, + "host": "http://localhost:9090", + "paths": { + "/v1/dir/ls": { + "get": { + "description": "DirectoryLsHandler is the api handler for listing the contents of a directory.", + "produces": [ + "application/json" + ], + "tags": [ + "dir" + ], + "summary": "List directory", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "dir path", + "name": "dirPath", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.ListFileResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/dir/mkdir": { + "post": { + "description": "DirectoryMkdirHandler is the api handler to create a new directory.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "dir" + ], + "summary": "Create directory", + "parameters": [ + { + "description": "pod name and dir path", + "name": "dir_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.DirRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "201": { + "description": "Created", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/dir/present": { + "get": { + "description": "DirectoryPresentHandler is the api handler which says if a directory is present or not", + "produces": [ + "application/json" + ], + "tags": [ + "dir" + ], + "summary": "Is directory present", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "dir path", + "name": "dirPath", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.DirPresentResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/dir/rename": { + "post": { + "description": "DirectoryRenameHandler is the api handler to rename a directory.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "dir" + ], + "summary": "Rename directory", + "parameters": [ + { + "description": "old name and new path", + "name": "dir_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/common.RenameRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/dir/rmdir": { + "delete": { + "description": "DirectoryRmdirHandler is the api handler to remove a directory.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "dir" + ], + "summary": "Remove directory", + "parameters": [ + { + "description": "pod name and dir path", + "name": "dir_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.DirRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/dir/stat": { + "get": { + "description": "DirectoryStatHandler is the api handler which gives the information about a directory", + "produces": [ + "application/json" + ], + "tags": [ + "dir" + ], + "summary": "Directory stat", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "dir path", + "name": "dirPath", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/dir.Stats" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/doc/count": { + "post": { + "description": "DocCountHandler is the api handler to count the number of documents in a given document database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "doc" + ], + "summary": "Count number of document in a table", + "parameters": [ + { + "description": "doc table info", + "name": "doc_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.DocCountRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/collection.TableKeyCount" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/doc/delete": { + "delete": { + "description": "DocDeleteHandler is the api handler to delete the given document database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "doc" + ], + "summary": "Delete a doc table", + "parameters": [ + { + "description": "doc table info", + "name": "doc_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.SimpleDocRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/doc/entry/delete": { + "delete": { + "description": "DocEntryDelHandler is the api handler to delete a document from a document datastore", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "doc" + ], + "summary": "Delete a document from a document datastore", + "parameters": [ + { + "type": "string", + "name": "id", + "in": "query" + }, + { + "type": "string", + "name": "podName", + "in": "query" + }, + { + "type": "string", + "name": "tableName", + "in": "query" + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/doc/entry/get": { + "get": { + "description": "DocEntryGetHandler is the api handler to get a document from a document datastore", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "doc" + ], + "summary": "Get a document from a document datastore", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "table name", + "name": "tableName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "id to search for", + "name": "id", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.DocGetResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/doc/entry/put": { + "post": { + "description": "DocEntryPutHandler is the api handler add a document in to a document datastore", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "doc" + ], + "summary": "Add a record in document datastore", + "parameters": [ + { + "type": "string", + "name": "doc", + "in": "query" + }, + { + "type": "string", + "name": "podName", + "in": "query" + }, + { + "type": "string", + "name": "tableName", + "in": "query" + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/doc/find": { + "get": { + "description": "DocFindHandler is the api handler to select rows from a given document datastore", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "doc" + ], + "summary": "Get rows from a given doc datastore", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "table name", + "name": "tableName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "expression to search for", + "name": "expr", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "number od documents", + "name": "limit", + "in": "query" + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.DocFindResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/doc/indexjson": { + "post": { + "description": "DocIndexJsonHandler is the api handler to index a json file that is present in a pod, in to the given document database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "doc" + ], + "summary": "Index a json file that is present in a pod, in to the given document database", + "parameters": [ + { + "description": "index request", + "name": "index_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.DocIndexRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/doc/loadjson": { + "post": { + "description": "DocLoadJsonHandler is the api handler that indexes a json file that is present in the local file system", + "consumes": [ + "multipart/form-data" + ], + "produces": [ + "application/json" + ], + "tags": [ + "doc" + ], + "summary": "Load json file from local file system", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "table name", + "name": "tableName", + "in": "query", + "required": true + }, + { + "type": "file", + "description": "json to index", + "name": "json", + "in": "formData", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/doc/ls": { + "get": { + "description": "DocListHandler is the api handler which lists all the document database in a pod", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "doc" + ], + "summary": "List all doc table", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.DocumentDBs" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/doc/new": { + "post": { + "description": "DocCreateHandler is the api handler to create a new document database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "doc" + ], + "summary": "Create in doc table", + "parameters": [ + { + "description": "doc table info", + "name": "doc_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.DocRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "201": { + "description": "Created", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/doc/open": { + "post": { + "description": "DocOpenHandler is the api handler to open a document database", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "doc" + ], + "summary": "Open a doc table", + "parameters": [ + { + "description": "doc table info", + "name": "doc_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.DocRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.DocumentDBs" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/file/delete": { + "delete": { + "description": "FileReceiveHandler is the api handler to delete a file from a given pod", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "file" + ], + "summary": "Delete a file", + "parameters": [ + { + "description": "pod name and file path", + "name": "file_delete_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.FileDeleteRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "404": { + "description": "Not Found", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/file/download": { + "get": { + "description": "FileDownloadHandlerGet is the api handler to download a file from a given pod", + "consumes": [ + "application/json" + ], + "produces": [ + "*/*" + ], + "tags": [ + "file" + ], + "summary": "Download a file", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "file path", + "name": "filePath", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "type": "integer" + } + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + }, + "post": { + "description": "FileDownloadHandlerPost is the api handler to download a file from a given pod", + "consumes": [ + "multipart/form-data" + ], + "produces": [ + "*/*" + ], + "tags": [ + "file" + ], + "summary": "Download a file", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "formData", + "required": true + }, + { + "type": "string", + "description": "file path", + "name": "filePath", + "in": "formData", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "type": "integer" + } + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/file/receive": { + "get": { + "description": "FileReceiveHandler is the api handler to receive a file in a given pod", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "file" + ], + "summary": "Receive a file", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "sharing reference", + "name": "sharingRef", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "file location", + "name": "dirPath", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.FileSharingReference" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/file/receiveinfo": { + "get": { + "description": "FileReceiveInfoHandler is the api handler to receive a file info", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "file" + ], + "summary": "Receive a file info", + "parameters": [ + { + "type": "string", + "description": "sharing reference", + "name": "sharingRef", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/user.ReceiveFileInfo" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/file/rename": { + "post": { + "description": "FileRenameHandler is the api handler to get the information of a file", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "file" + ], + "summary": "Info of a file", + "parameters": [ + { + "description": "old name \u0026 new name", + "name": "rename_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/common.RenameRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "404": { + "description": "Not Found", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/file/share": { + "post": { + "description": "FileShareHandler is the api handler to share a file from a given pod", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "file" + ], + "summary": "Share a file", + "parameters": [ + { + "description": "file share request params", + "name": "file_share_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.FileShareRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.FileSharingReference" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/file/stat": { + "get": { + "description": "FileStatHandler is the api handler to get the information of a file", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "file" + ], + "summary": "Info of a file", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "file path", + "name": "filePath", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/file.Stats" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/file/upload": { + "post": { + "description": "FileUploadHandler is the api handler to upload a file from a local file system to the dfs", + "consumes": [ + "multipart/form-data" + ], + "produces": [ + "application/json" + ], + "tags": [ + "file" + ], + "summary": "Upload a file", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "formData", + "required": true + }, + { + "type": "string", + "description": "location", + "name": "dirPath", + "in": "formData", + "required": true + }, + { + "type": "string", + "example": "4Kb, 1Mb", + "description": "block size to break the file", + "name": "blockSize", + "in": "formData", + "required": true + }, + { + "type": "file", + "description": "file to upload", + "name": "files", + "in": "formData", + "required": true + }, + { + "type": "string", + "example": "snappy, gzip", + "description": "cookie parameter", + "name": "fairOS-dfs-Compression", + "in": "header" + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/kv/count": { + "post": { + "description": "KVCountHandler is the api handler to count the number of rows in a key value table", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "kv" + ], + "summary": "Count rows in a key value table", + "parameters": [ + { + "description": "kv table request", + "name": "kv_table_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.KVTableRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/collection.TableKeyCount" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/kv/delete": { + "delete": { + "description": "KVDeleteHandler is the api handler to delete a key value table", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "kv" + ], + "summary": "Delete a key value table", + "parameters": [ + { + "description": "kv table request", + "name": "kv_table_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.KVTableRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/kv/entry/del": { + "delete": { + "description": "KVDelHandler is the api handler to delete a key and value from the kv table", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "kv" + ], + "summary": "Delete key-value from the kv table", + "parameters": [ + { + "description": "delete request", + "name": "delete_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.KVEntryDeleteRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.KVResponseRaw" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/kv/entry/get": { + "get": { + "description": "KVGetHandler is the api handler to get a value from the kv table", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "kv" + ], + "summary": "get value from the kv table", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "table name", + "name": "tableName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "key", + "name": "key", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.KVResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/kv/entry/get-data": { + "get": { + "description": "KVGetDataHandler is the api handler to get raw value from the kv table", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "kv" + ], + "summary": "get value from the kv table", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "table name", + "name": "tableName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "key", + "name": "key", + "in": "query", + "required": true + }, + { + "type": "string", + "example": "byte-string, string", + "description": "format of the value", + "name": "format", + "in": "query" + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.KVResponseRaw" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/kv/entry/present": { + "get": { + "description": "KVPresentHandler is the api handler to check if a value exists in the kv table", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "kv" + ], + "summary": "Check if a value exists in the kv table", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "table name", + "name": "tableName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "key", + "name": "key", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/kv/entry/put": { + "post": { + "description": "KVPutHandler is the api handler to put a key-value in the kv table", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "kv" + ], + "summary": "put key and value in the kv table", + "parameters": [ + { + "description": "kv entry", + "name": "kv_entry", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.KVEntryRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/kv/export": { + "post": { + "description": "KVExportHandler is the api handler to export from a particular key with the given prefix", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "kv" + ], + "summary": "Export from a particular key with the given prefix", + "parameters": [ + { + "description": "kv export info", + "name": "export_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.KVExportRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": true + } + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/kv/loadcsv": { + "post": { + "description": "KVLoadCSVHandler is the api handler to load a csv file as key and value in a KV table", + "consumes": [ + "multipart/form-data" + ], + "produces": [ + "application/json" + ], + "tags": [ + "kv" + ], + "summary": "Upload a csv file in kv table", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "formData", + "required": true + }, + { + "type": "string", + "description": "table name", + "name": "tableName", + "in": "formData", + "required": true + }, + { + "type": "string", + "description": "keep in memory", + "name": "memory", + "in": "formData" + }, + { + "type": "file", + "description": "file to upload", + "name": "csv", + "in": "formData", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/kv/ls": { + "get": { + "description": "KVListHandler is the api handler to list all the key value tables in a pod", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "kv" + ], + "summary": "List all key value tables", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.Collections" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/kv/new": { + "post": { + "description": "KVCreateHandler is the api handler to create a key value table", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "kv" + ], + "summary": "Create a key value table", + "parameters": [ + { + "description": "kv table request", + "name": "kv_table_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.KVTableRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "201": { + "description": "Created", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/kv/open": { + "post": { + "description": "KVOpenHandler is the api handler to open a key value table", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "kv" + ], + "summary": "Open a key value table", + "parameters": [ + { + "description": "kv table request", + "name": "kv_table_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.KVTableRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "201": { + "description": "Created", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/kv/seek": { + "post": { + "description": "KVSeekHandler is the api handler to seek to a particular key with the given prefix", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "kv" + ], + "summary": "Seek in kv table", + "parameters": [ + { + "description": "kv seek info", + "name": "export_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.KVExportRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/kv/seek/next": { + "post": { + "description": "KVGetNextHandler is the api handler to get the key and value from the current seek position", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "kv" + ], + "summary": "Get next value from last seek in kv table", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "table name", + "name": "tableName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.KVResponse" + } + }, + "204": { + "description": "No Content", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/pod/close": { + "post": { + "description": "PodCloseHandler is the api handler to close an open pod", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "pod" + ], + "summary": "Close pod", + "parameters": [ + { + "description": "pod name", + "name": "pod_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.PodNameRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/pod/delete": { + "delete": { + "description": "PodDeleteHandler is the api handler to delete a pod", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "pod" + ], + "summary": "Delete pod", + "parameters": [ + { + "description": "pod name and user password", + "name": "pod_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.PodNameRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/pod/ls": { + "get": { + "description": "PodListHandler is the api handler to list all pods", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "pod" + ], + "summary": "List pods", + "parameters": [ + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.PodListResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/pod/new": { + "post": { + "description": "PodCreateHandler is the api handler to create a new pod", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "pod" + ], + "summary": "Create pod", + "parameters": [ + { + "description": "pod name and user password", + "name": "pod_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.PodNameRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "201": { + "description": "Created", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/pod/open": { + "post": { + "description": "PodOpenHandler is the api handler to open pod", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "pod" + ], + "summary": "Open pod", + "parameters": [ + { + "description": "pod name and user password", + "name": "pod_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.PodNameRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/pod/open-async": { + "post": { + "description": "PodOpenAsyncHandler is the api handler to open pod asynchronously", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "pod" + ], + "summary": "Open pod", + "parameters": [ + { + "description": "pod name and user password", + "name": "pod_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.PodNameRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/pod/present": { + "get": { + "description": "PodPresentHandler is the api handler to check if a pod is present", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "pod" + ], + "summary": "Is pod present", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/pod/receive": { + "get": { + "description": "PodReceiveHandler is the api handler to receive shared pod from shared reference", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "pod" + ], + "summary": "Receive shared pod", + "parameters": [ + { + "type": "string", + "description": "pod sharing reference", + "name": "sharingRef", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "pod name to be saved as", + "name": "sharedPodName", + "in": "query" + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/pod/receiveinfo": { + "get": { + "description": "PodReceiveInfoHandler is the api handler to receive shared pod info from shared reference", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "pod" + ], + "summary": "Receive shared pod info", + "parameters": [ + { + "type": "string", + "description": "pod sharing reference", + "name": "sharingRef", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/pod.ShareInfo" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/pod/share": { + "post": { + "description": "PodShareHandler is the api handler to share a pod to the public", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "pod" + ], + "summary": "Share pod", + "parameters": [ + { + "description": "pod name and user password", + "name": "pod_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/common.PodShareRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.PodSharingReference" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/pod/stat": { + "get": { + "description": "PodStatHandler is the api handler get information about a pod", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "pod" + ], + "summary": "Stats for pod", + "parameters": [ + { + "type": "string", + "description": "pod name", + "name": "podName", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.PodStatResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/pod/sync": { + "post": { + "description": "PodSyncHandler is the api handler to sync a pod's content", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "pod" + ], + "summary": "Sync pod", + "parameters": [ + { + "description": "pod name", + "name": "pod_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.PodNameRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/user/delete": { + "post": { + "tags": [ + "user" + ], + "deprecated": true, + "responses": {} + } + }, + "/v1/user/export": { + "post": { + "tags": [ + "user" + ], + "deprecated": true, + "responses": {} + } + }, + "/v1/user/isloggedin": { + "get": { + "description": "Check if the given user is logged-in", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "user" + ], + "summary": "Is user logged-in", + "parameters": [ + { + "type": "string", + "description": "user name", + "name": "userName", + "in": "query", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.LoginStatus" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/user/login": { + "post": { + "tags": [ + "user" + ], + "deprecated": true, + "responses": {} + } + }, + "/v1/user/logout": { + "post": { + "description": "logs-out user", + "consumes": [ + "application/json" + ], + "tags": [ + "user" + ], + "summary": "Logout", + "parameters": [ + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v1/user/present": { + "get": { + "tags": [ + "user" + ], + "deprecated": true, + "responses": {} + } + }, + "/v1/user/signup": { + "post": { + "tags": [ + "user" + ], + "deprecated": true, + "responses": {} + } + }, + "/v1/user/stat": { + "get": { + "description": "show user stats", + "consumes": [ + "application/json" + ], + "tags": [ + "user" + ], + "summary": "User stat", + "parameters": [ + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/user.Stat" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v2/user/delete": { + "delete": { + "description": "deletes user info from swarm", + "produces": [ + "application/json" + ], + "tags": [ + "user" + ], + "summary": "Delete user for ENS based authentication", + "parameters": [ + { + "description": "user delete request", + "name": "UserDeleteRequest", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.UserDeleteRequest" + } + }, + { + "type": "string", + "description": "cookie parameter", + "name": "Cookie", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v2/user/login": { + "post": { + "description": "login user with the new ENS based authentication", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "user" + ], + "summary": "Login User", + "parameters": [ + { + "description": "user name", + "name": "user_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/common.UserLoginRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.UserLoginResponse" + }, + "headers": { + "Set-Cookie": { + "type": "string", + "description": "fairos-dfs session" + } + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "404": { + "description": "Not Found", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v2/user/present": { + "get": { + "description": "checks if the new user is present in the new ENS based authentication", + "produces": [ + "application/json" + ], + "tags": [ + "user" + ], + "summary": "Check if user is present", + "parameters": [ + { + "type": "string", + "description": "user name", + "name": "userName", + "in": "query", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/api.PresentResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + }, + "/v2/user/signup": { + "post": { + "description": "registers new user with the new ENS based authentication", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "user" + ], + "summary": "Register New User", + "parameters": [ + { + "description": "user name", + "name": "user_request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/common.UserSignupRequest" + } + } + ], + "responses": { + "201": { + "description": "Created", + "schema": { + "$ref": "#/definitions/api.UserSignupResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.response" + } + }, + "402": { + "description": "Payment Required", + "schema": { + "$ref": "#/definitions/api.UserSignupResponse" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.response" + } + } + } + } + } + }, + "definitions": { + "api.Collection": { + "type": "object", + "properties": { + "indexes": { + "type": "array", + "items": { + "type": "string" + } + }, + "tableName": { + "type": "string" + }, + "type": { + "type": "string" + } + } + }, + "api.Collections": { + "type": "object", + "properties": { + "tables": { + "type": "array", + "items": { + "$ref": "#/definitions/api.Collection" + } + } + } + }, + "api.DirPresentResponse": { + "type": "object", + "properties": { + "error": { + "type": "string" + }, + "present": { + "type": "boolean" + } + } + }, + "api.DirRequest": { + "type": "object", + "properties": { + "dirPath": { + "type": "string" + }, + "podName": { + "type": "string" + } + } + }, + "api.DocCountRequest": { + "type": "object", + "properties": { + "expr": { + "type": "string" + }, + "mutable": { + "type": "boolean" + }, + "podName": { + "type": "string" + }, + "si": { + "type": "string" + }, + "tableName": { + "type": "string" + } + } + }, + "api.DocFindResponse": { + "type": "object", + "properties": { + "docs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "integer" + } + } + } + } + }, + "api.DocGetResponse": { + "type": "object", + "properties": { + "doc": { + "type": "array", + "items": { + "type": "integer" + } + } + } + }, + "api.DocIndexRequest": { + "type": "object", + "properties": { + "fileName": { + "type": "string" + }, + "podName": { + "type": "string" + }, + "tableName": { + "type": "string" + } + } + }, + "api.DocRequest": { + "type": "object", + "properties": { + "mutable": { + "type": "boolean" + }, + "podName": { + "type": "string" + }, + "si": { + "type": "string" + }, + "tableName": { + "type": "string" + } + } + }, + "api.DocumentDBs": { + "type": "object", + "properties": { + "tables": { + "type": "array", + "items": { + "$ref": "#/definitions/api.documentDB" + } + } + } + }, + "api.FileDeleteRequest": { + "type": "object", + "properties": { + "filePath": { + "type": "string" + }, + "podName": { + "type": "string" + } + } + }, + "api.FileShareRequest": { + "type": "object", + "properties": { + "destUser": { + "type": "string" + }, + "filePath": { + "type": "string" + }, + "podName": { + "type": "string" + } + } + }, + "api.FileSharingReference": { + "type": "object", + "properties": { + "fileSharingReference": { + "type": "string" + } + } + }, + "api.KVEntryDeleteRequest": { + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "podName": { + "type": "string" + }, + "tableName": { + "type": "string" + } + } + }, + "api.KVEntryRequest": { + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "podName": { + "type": "string" + }, + "tableName": { + "type": "string" + }, + "value": { + "type": "string" + } + } + }, + "api.KVExportRequest": { + "type": "object", + "properties": { + "endPrefix": { + "type": "string" + }, + "limit": { + "type": "string" + }, + "podName": { + "type": "string" + }, + "startPrefix": { + "type": "string" + }, + "tableName": { + "type": "string" + } + } + }, + "api.KVResponse": { + "type": "object", + "properties": { + "keys": { + "type": "array", + "items": { + "type": "string" + } + }, + "values": { + "type": "array", + "items": { + "type": "integer" + } + } + } + }, + "api.KVResponseRaw": { + "type": "object", + "properties": { + "keys": { + "type": "array", + "items": { + "type": "string" + } + }, + "values": { + "type": "string" + } + } + }, + "api.KVTableRequest": { + "type": "object", + "properties": { + "indexType": { + "type": "string" + }, + "podName": { + "type": "string" + }, + "tableName": { + "type": "string" + } + } + }, + "api.ListFileResponse": { + "type": "object", + "properties": { + "dirs": { + "type": "array", + "items": { + "$ref": "#/definitions/dir.Entry" + } + }, + "files": { + "type": "array", + "items": { + "$ref": "#/definitions/file.Entry" + } + } + } + }, + "api.LoginStatus": { + "type": "object", + "properties": { + "loggedin": { + "type": "boolean" + } + } + }, + "api.PodListResponse": { + "type": "object", + "properties": { + "podName": { + "type": "array", + "items": { + "type": "string" + } + }, + "sharedPodName": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "api.PodNameRequest": { + "type": "object", + "properties": { + "podName": { + "type": "string" + } + } + }, + "api.PodSharingReference": { + "type": "object", + "properties": { + "podSharingReference": { + "type": "string" + } + } + }, + "api.PodStatResponse": { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "podName": { + "type": "string" + } + } + }, + "api.PresentResponse": { + "type": "object", + "properties": { + "present": { + "type": "boolean" + } + } + }, + "api.SimpleDocRequest": { + "type": "object", + "properties": { + "podName": { + "type": "string" + }, + "tableName": { + "type": "string" + } + } + }, + "api.UserDeleteRequest": { + "type": "object", + "properties": { + "password": { + "type": "string" + } + } + }, + "api.UserLoginResponse": { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "message": { + "type": "string" + }, + "nameHash": { + "type": "string" + }, + "publicKey": { + "type": "string" + } + } + }, + "api.UserSignupResponse": { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "message": { + "type": "string" + }, + "mnemonic": { + "type": "string" + }, + "nameHash": { + "type": "string" + }, + "publicKey": { + "type": "string" + } + } + }, + "api.documentDB": { + "type": "object", + "properties": { + "indexes": { + "type": "array", + "items": { + "$ref": "#/definitions/collection.SIndex" + } + }, + "tableName": { + "type": "string" + }, + "type": { + "type": "string" + } + } + }, + "api.response": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + } + }, + "collection.SIndex": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "type": { + "type": "integer" + } + } + }, + "collection.TableKeyCount": { + "type": "object", + "properties": { + "count": { + "type": "integer" + }, + "table_name": { + "type": "string" + } + } + }, + "common.PodShareRequest": { + "type": "object", + "properties": { + "podName": { + "type": "string" + }, + "sharedPodName": { + "type": "string" + } + } + }, + "common.RenameRequest": { + "type": "object", + "properties": { + "newPath": { + "type": "string" + }, + "oldPath": { + "type": "string" + }, + "podName": { + "type": "string" + } + } + }, + "common.UserLoginRequest": { + "type": "object", + "properties": { + "password": { + "type": "string" + }, + "userName": { + "type": "string" + } + } + }, + "common.UserSignupRequest": { + "type": "object", + "properties": { + "mnemonic": { + "type": "string" + }, + "password": { + "type": "string" + }, + "userName": { + "type": "string" + } + } + }, + "dir.Entry": { + "type": "object", + "properties": { + "accessTime": { + "type": "string" + }, + "blockSize": { + "type": "string" + }, + "contentType": { + "type": "string" + }, + "creationTime": { + "type": "string" + }, + "modificationTime": { + "type": "string" + }, + "name": { + "type": "string" + }, + "size": { + "type": "string" + } + } + }, + "dir.Stats": { + "type": "object", + "properties": { + "accessTime": { + "type": "string" + }, + "creationTime": { + "type": "string" + }, + "dirName": { + "type": "string" + }, + "dirPath": { + "type": "string" + }, + "modificationTime": { + "type": "string" + }, + "noOfDirectories": { + "type": "string" + }, + "noOfFiles": { + "type": "string" + }, + "podName": { + "type": "string" + } + } + }, + "file.Blocks": { + "type": "object", + "properties": { + "compressedSize": { + "type": "string" + }, + "reference": { + "type": "string" + }, + "size": { + "type": "string" + } + } + }, + "file.Entry": { + "type": "object", + "properties": { + "accessTime": { + "type": "string" + }, + "blockSize": { + "type": "string" + }, + "contentType": { + "type": "string" + }, + "creationTime": { + "type": "string" + }, + "modificationTime": { + "type": "string" + }, + "name": { + "type": "string" + }, + "size": { + "type": "string" + } + } + }, + "file.Stats": { + "type": "object", + "properties": { + "accessTime": { + "type": "string" + }, + "blockSize": { + "type": "string" + }, + "blocks": { + "type": "array", + "items": { + "$ref": "#/definitions/file.Blocks" + } + }, + "compression": { + "type": "string" + }, + "contentType": { + "type": "string" + }, + "creationTime": { + "type": "string" + }, + "fileName": { + "type": "string" + }, + "filePath": { + "type": "string" + }, + "fileSize": { + "type": "string" + }, + "modificationTime": { + "type": "string" + }, + "podName": { + "type": "string" + } + } + }, + "pod.ShareInfo": { + "type": "object", + "properties": { + "password": { + "type": "string" + }, + "podAddress": { + "type": "string" + }, + "podName": { + "type": "string" + }, + "userAddress": { + "type": "string" + } + } + }, + "user.ReceiveFileInfo": { + "type": "object", + "properties": { + "blockSize": { + "type": "string" + }, + "compression": { + "type": "string" + }, + "contentType": { + "type": "string" + }, + "destAddress": { + "type": "string" + }, + "name": { + "type": "string" + }, + "numberOfBlocks": { + "type": "string" + }, + "sharedTime": { + "type": "string" + }, + "size": { + "type": "string" + }, + "sourceAddress": { + "type": "string" + } + } + }, + "user.Stat": { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "userName": { + "type": "string" + } + } + } + } +} \ No newline at end of file diff --git a/swagger/swagger.yaml b/swagger/swagger.yaml new file mode 100644 index 00000000..1470438c --- /dev/null +++ b/swagger/swagger.yaml @@ -0,0 +1,2691 @@ +definitions: + api.Collection: + properties: + indexes: + items: + type: string + type: array + tableName: + type: string + type: + type: string + type: object + api.Collections: + properties: + tables: + items: + $ref: '#/definitions/api.Collection' + type: array + type: object + api.DirPresentResponse: + properties: + error: + type: string + present: + type: boolean + type: object + api.DirRequest: + properties: + dirPath: + type: string + podName: + type: string + type: object + api.DocCountRequest: + properties: + expr: + type: string + mutable: + type: boolean + podName: + type: string + si: + type: string + tableName: + type: string + type: object + api.DocFindResponse: + properties: + docs: + items: + items: + type: integer + type: array + type: array + type: object + api.DocGetResponse: + properties: + doc: + items: + type: integer + type: array + type: object + api.DocIndexRequest: + properties: + fileName: + type: string + podName: + type: string + tableName: + type: string + type: object + api.DocRequest: + properties: + mutable: + type: boolean + podName: + type: string + si: + type: string + tableName: + type: string + type: object + api.DocumentDBs: + properties: + tables: + items: + $ref: '#/definitions/api.documentDB' + type: array + type: object + api.FileDeleteRequest: + properties: + filePath: + type: string + podName: + type: string + type: object + api.FileShareRequest: + properties: + destUser: + type: string + filePath: + type: string + podName: + type: string + type: object + api.FileSharingReference: + properties: + fileSharingReference: + type: string + type: object + api.KVEntryDeleteRequest: + properties: + key: + type: string + podName: + type: string + tableName: + type: string + type: object + api.KVEntryRequest: + properties: + key: + type: string + podName: + type: string + tableName: + type: string + value: + type: string + type: object + api.KVExportRequest: + properties: + endPrefix: + type: string + limit: + type: string + podName: + type: string + startPrefix: + type: string + tableName: + type: string + type: object + api.KVResponse: + properties: + keys: + items: + type: string + type: array + values: + items: + type: integer + type: array + type: object + api.KVResponseRaw: + properties: + keys: + items: + type: string + type: array + values: + type: string + type: object + api.KVTableRequest: + properties: + indexType: + type: string + podName: + type: string + tableName: + type: string + type: object + api.ListFileResponse: + properties: + dirs: + items: + $ref: '#/definitions/dir.Entry' + type: array + files: + items: + $ref: '#/definitions/file.Entry' + type: array + type: object + api.LoginStatus: + properties: + loggedin: + type: boolean + type: object + api.PodListResponse: + properties: + podName: + items: + type: string + type: array + sharedPodName: + items: + type: string + type: array + type: object + api.PodNameRequest: + properties: + podName: + type: string + type: object + api.PodSharingReference: + properties: + podSharingReference: + type: string + type: object + api.PodStatResponse: + properties: + address: + type: string + podName: + type: string + type: object + api.PresentResponse: + properties: + present: + type: boolean + type: object + api.SimpleDocRequest: + properties: + podName: + type: string + tableName: + type: string + type: object + api.UserDeleteRequest: + properties: + password: + type: string + type: object + api.UserLoginResponse: + properties: + address: + type: string + message: + type: string + nameHash: + type: string + publicKey: + type: string + type: object + api.UserSignupResponse: + properties: + address: + type: string + message: + type: string + mnemonic: + type: string + nameHash: + type: string + publicKey: + type: string + type: object + api.documentDB: + properties: + indexes: + items: + $ref: '#/definitions/collection.SIndex' + type: array + tableName: + type: string + type: + type: string + type: object + api.response: + properties: + message: + type: string + type: object + collection.SIndex: + properties: + name: + type: string + type: + type: integer + type: object + collection.TableKeyCount: + properties: + count: + type: integer + table_name: + type: string + type: object + common.PodShareRequest: + properties: + podName: + type: string + sharedPodName: + type: string + type: object + common.RenameRequest: + properties: + newPath: + type: string + oldPath: + type: string + podName: + type: string + type: object + common.UserLoginRequest: + properties: + password: + type: string + userName: + type: string + type: object + common.UserSignupRequest: + properties: + mnemonic: + type: string + password: + type: string + userName: + type: string + type: object + dir.Entry: + properties: + accessTime: + type: string + blockSize: + type: string + contentType: + type: string + creationTime: + type: string + modificationTime: + type: string + name: + type: string + size: + type: string + type: object + dir.Stats: + properties: + accessTime: + type: string + creationTime: + type: string + dirName: + type: string + dirPath: + type: string + modificationTime: + type: string + noOfDirectories: + type: string + noOfFiles: + type: string + podName: + type: string + type: object + file.Blocks: + properties: + compressedSize: + type: string + reference: + type: string + size: + type: string + type: object + file.Entry: + properties: + accessTime: + type: string + blockSize: + type: string + contentType: + type: string + creationTime: + type: string + modificationTime: + type: string + name: + type: string + size: + type: string + type: object + file.Stats: + properties: + accessTime: + type: string + blockSize: + type: string + blocks: + items: + $ref: '#/definitions/file.Blocks' + type: array + compression: + type: string + contentType: + type: string + creationTime: + type: string + fileName: + type: string + filePath: + type: string + fileSize: + type: string + modificationTime: + type: string + podName: + type: string + type: object + pod.ShareInfo: + properties: + password: + type: string + podAddress: + type: string + podName: + type: string + userAddress: + type: string + type: object + user.ReceiveFileInfo: + properties: + blockSize: + type: string + compression: + type: string + contentType: + type: string + destAddress: + type: string + name: + type: string + numberOfBlocks: + type: string + sharedTime: + type: string + size: + type: string + sourceAddress: + type: string + type: object + user.Stat: + properties: + address: + type: string + userName: + type: string + type: object +host: http://localhost:9090 +info: + contact: {} + description: A list of the currently provided Interfaces to interact with FairOS + decentralised file system(dfs), implementing user, pod, file system, key value + store and document store + license: + name: Apache 2.0 + url: http://www.apache.org/licenses/LICENSE-2.0.html + title: FairOS-dfs server + version: v0.9.0-rc1 +paths: + /v1/dir/ls: + get: + description: DirectoryLsHandler is the api handler for listing the contents + of a directory. + parameters: + - description: pod name + in: query + name: podName + required: true + type: string + - description: dir path + in: query + name: dirPath + required: true + type: string + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.ListFileResponse' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: List directory + tags: + - dir + /v1/dir/mkdir: + post: + consumes: + - application/json + description: DirectoryMkdirHandler is the api handler to create a new directory. + parameters: + - description: pod name and dir path + in: body + name: dir_request + required: true + schema: + $ref: '#/definitions/api.DirRequest' + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "201": + description: Created + schema: + $ref: '#/definitions/api.response' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Create directory + tags: + - dir + /v1/dir/present: + get: + description: DirectoryPresentHandler is the api handler which says if a directory + is present or not + parameters: + - description: pod name + in: query + name: podName + required: true + type: string + - description: dir path + in: query + name: dirPath + required: true + type: string + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.DirPresentResponse' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Is directory present + tags: + - dir + /v1/dir/rename: + post: + consumes: + - application/json + description: DirectoryRenameHandler is the api handler to rename a directory. + parameters: + - description: old name and new path + in: body + name: dir_request + required: true + schema: + $ref: '#/definitions/common.RenameRequest' + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.response' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Rename directory + tags: + - dir + /v1/dir/rmdir: + delete: + consumes: + - application/json + description: DirectoryRmdirHandler is the api handler to remove a directory. + parameters: + - description: pod name and dir path + in: body + name: dir_request + required: true + schema: + $ref: '#/definitions/api.DirRequest' + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.response' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Remove directory + tags: + - dir + /v1/dir/stat: + get: + description: DirectoryStatHandler is the api handler which gives the information + about a directory + parameters: + - description: pod name + in: query + name: podName + required: true + type: string + - description: dir path + in: query + name: dirPath + required: true + type: string + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/dir.Stats' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Directory stat + tags: + - dir + /v1/doc/count: + post: + consumes: + - application/json + description: DocCountHandler is the api handler to count the number of documents + in a given document database + parameters: + - description: doc table info + in: body + name: doc_request + required: true + schema: + $ref: '#/definitions/api.DocCountRequest' + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/collection.TableKeyCount' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Count number of document in a table + tags: + - doc + /v1/doc/delete: + delete: + consumes: + - application/json + description: DocDeleteHandler is the api handler to delete the given document + database + parameters: + - description: doc table info + in: body + name: doc_request + required: true + schema: + $ref: '#/definitions/api.SimpleDocRequest' + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.response' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Delete a doc table + tags: + - doc + /v1/doc/entry/delete: + delete: + consumes: + - application/json + description: DocEntryDelHandler is the api handler to delete a document from + a document datastore + parameters: + - in: query + name: id + type: string + - in: query + name: podName + type: string + - in: query + name: tableName + type: string + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.response' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Delete a document from a document datastore + tags: + - doc + /v1/doc/entry/get: + get: + consumes: + - application/json + description: DocEntryGetHandler is the api handler to get a document from a + document datastore + parameters: + - description: pod name + in: query + name: podName + required: true + type: string + - description: table name + in: query + name: tableName + required: true + type: string + - description: id to search for + in: query + name: id + required: true + type: string + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.DocGetResponse' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Get a document from a document datastore + tags: + - doc + /v1/doc/entry/put: + post: + consumes: + - application/json + description: DocEntryPutHandler is the api handler add a document in to a document + datastore + parameters: + - in: query + name: doc + type: string + - in: query + name: podName + type: string + - in: query + name: tableName + type: string + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.response' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Add a record in document datastore + tags: + - doc + /v1/doc/find: + get: + consumes: + - application/json + description: DocFindHandler is the api handler to select rows from a given document + datastore + parameters: + - description: pod name + in: query + name: podName + required: true + type: string + - description: table name + in: query + name: tableName + required: true + type: string + - description: expression to search for + in: query + name: expr + required: true + type: string + - description: number od documents + in: query + name: limit + type: string + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.DocFindResponse' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Get rows from a given doc datastore + tags: + - doc + /v1/doc/indexjson: + post: + consumes: + - application/json + description: DocIndexJsonHandler is the api handler to index a json file that + is present in a pod, in to the given document database + parameters: + - description: index request + in: body + name: index_request + required: true + schema: + $ref: '#/definitions/api.DocIndexRequest' + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.response' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Index a json file that is present in a pod, in to the given document + database + tags: + - doc + /v1/doc/loadjson: + post: + consumes: + - multipart/form-data + description: DocLoadJsonHandler is the api handler that indexes a json file + that is present in the local file system + parameters: + - description: pod name + in: query + name: podName + required: true + type: string + - description: table name + in: query + name: tableName + required: true + type: string + - description: json to index + in: formData + name: json + required: true + type: file + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.response' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Load json file from local file system + tags: + - doc + /v1/doc/ls: + get: + consumes: + - application/json + description: DocListHandler is the api handler which lists all the document + database in a pod + parameters: + - description: pod name + in: query + name: podName + required: true + type: string + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.DocumentDBs' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: List all doc table + tags: + - doc + /v1/doc/new: + post: + consumes: + - application/json + description: DocCreateHandler is the api handler to create a new document database + parameters: + - description: doc table info + in: body + name: doc_request + required: true + schema: + $ref: '#/definitions/api.DocRequest' + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "201": + description: Created + schema: + $ref: '#/definitions/api.response' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Create in doc table + tags: + - doc + /v1/doc/open: + post: + consumes: + - application/json + description: DocOpenHandler is the api handler to open a document database + parameters: + - description: doc table info + in: body + name: doc_request + required: true + schema: + $ref: '#/definitions/api.DocRequest' + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.DocumentDBs' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Open a doc table + tags: + - doc + /v1/file/delete: + delete: + consumes: + - application/json + description: FileReceiveHandler is the api handler to delete a file from a given + pod + parameters: + - description: pod name and file path + in: body + name: file_delete_request + required: true + schema: + $ref: '#/definitions/api.FileDeleteRequest' + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.response' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "404": + description: Not Found + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Delete a file + tags: + - file + /v1/file/download: + get: + consumes: + - application/json + description: FileDownloadHandlerGet is the api handler to download a file from + a given pod + parameters: + - description: pod name + in: query + name: podName + required: true + type: string + - description: file path + in: query + name: filePath + required: true + type: string + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - '*/*' + responses: + "200": + description: OK + schema: + items: + type: integer + type: array + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Download a file + tags: + - file + post: + consumes: + - multipart/form-data + description: FileDownloadHandlerPost is the api handler to download a file from + a given pod + parameters: + - description: pod name + in: formData + name: podName + required: true + type: string + - description: file path + in: formData + name: filePath + required: true + type: string + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - '*/*' + responses: + "200": + description: OK + schema: + items: + type: integer + type: array + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Download a file + tags: + - file + /v1/file/receive: + get: + consumes: + - application/json + description: FileReceiveHandler is the api handler to receive a file in a given + pod + parameters: + - description: pod name + in: query + name: podName + required: true + type: string + - description: sharing reference + in: query + name: sharingRef + required: true + type: string + - description: file location + in: query + name: dirPath + required: true + type: string + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.FileSharingReference' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Receive a file + tags: + - file + /v1/file/receiveinfo: + get: + consumes: + - application/json + description: FileReceiveInfoHandler is the api handler to receive a file info + parameters: + - description: sharing reference + in: query + name: sharingRef + required: true + type: string + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/user.ReceiveFileInfo' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Receive a file info + tags: + - file + /v1/file/rename: + post: + consumes: + - application/json + description: FileRenameHandler is the api handler to get the information of + a file + parameters: + - description: old name & new name + in: body + name: rename_request + required: true + schema: + $ref: '#/definitions/common.RenameRequest' + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.response' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "404": + description: Not Found + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Info of a file + tags: + - file + /v1/file/share: + post: + consumes: + - application/json + description: FileShareHandler is the api handler to share a file from a given + pod + parameters: + - description: file share request params + in: body + name: file_share_request + required: true + schema: + $ref: '#/definitions/api.FileShareRequest' + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.FileSharingReference' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Share a file + tags: + - file + /v1/file/stat: + get: + consumes: + - application/json + description: FileStatHandler is the api handler to get the information of a + file + parameters: + - description: pod name + in: query + name: podName + required: true + type: string + - description: file path + in: query + name: filePath + required: true + type: string + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/file.Stats' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Info of a file + tags: + - file + /v1/file/upload: + post: + consumes: + - multipart/form-data + description: FileUploadHandler is the api handler to upload a file from a local + file system to the dfs + parameters: + - description: pod name + in: formData + name: podName + required: true + type: string + - description: location + in: formData + name: dirPath + required: true + type: string + - description: block size to break the file + example: 4Kb, 1Mb + in: formData + name: blockSize + required: true + type: string + - description: file to upload + in: formData + name: files + required: true + type: file + - description: cookie parameter + example: snappy, gzip + in: header + name: fairOS-dfs-Compression + type: string + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.response' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Upload a file + tags: + - file + /v1/kv/count: + post: + consumes: + - application/json + description: KVCountHandler is the api handler to count the number of rows in + a key value table + parameters: + - description: kv table request + in: body + name: kv_table_request + required: true + schema: + $ref: '#/definitions/api.KVTableRequest' + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/collection.TableKeyCount' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Count rows in a key value table + tags: + - kv + /v1/kv/delete: + delete: + consumes: + - application/json + description: KVDeleteHandler is the api handler to delete a key value table + parameters: + - description: kv table request + in: body + name: kv_table_request + required: true + schema: + $ref: '#/definitions/api.KVTableRequest' + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.response' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Delete a key value table + tags: + - kv + /v1/kv/entry/del: + delete: + consumes: + - application/json + description: KVDelHandler is the api handler to delete a key and value from + the kv table + parameters: + - description: delete request + in: body + name: delete_request + required: true + schema: + $ref: '#/definitions/api.KVEntryDeleteRequest' + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.KVResponseRaw' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Delete key-value from the kv table + tags: + - kv + /v1/kv/entry/get: + get: + consumes: + - application/json + description: KVGetHandler is the api handler to get a value from the kv table + parameters: + - description: pod name + in: query + name: podName + required: true + type: string + - description: table name + in: query + name: tableName + required: true + type: string + - description: key + in: query + name: key + required: true + type: string + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.KVResponse' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: get value from the kv table + tags: + - kv + /v1/kv/entry/get-data: + get: + consumes: + - application/json + description: KVGetDataHandler is the api handler to get raw value from the kv + table + parameters: + - description: pod name + in: query + name: podName + required: true + type: string + - description: table name + in: query + name: tableName + required: true + type: string + - description: key + in: query + name: key + required: true + type: string + - description: format of the value + example: byte-string, string + in: query + name: format + type: string + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.KVResponseRaw' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: get value from the kv table + tags: + - kv + /v1/kv/entry/present: + get: + consumes: + - application/json + description: KVPresentHandler is the api handler to check if a value exists + in the kv table + parameters: + - description: pod name + in: query + name: podName + required: true + type: string + - description: table name + in: query + name: tableName + required: true + type: string + - description: key + in: query + name: key + required: true + type: string + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.response' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Check if a value exists in the kv table + tags: + - kv + /v1/kv/entry/put: + post: + consumes: + - application/json + description: KVPutHandler is the api handler to put a key-value in the kv table + parameters: + - description: kv entry + in: body + name: kv_entry + required: true + schema: + $ref: '#/definitions/api.KVEntryRequest' + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.response' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: put key and value in the kv table + tags: + - kv + /v1/kv/export: + post: + consumes: + - application/json + description: KVExportHandler is the api handler to export from a particular + key with the given prefix + parameters: + - description: kv export info + in: body + name: export_request + required: true + schema: + $ref: '#/definitions/api.KVExportRequest' + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + items: + additionalProperties: true + type: object + type: array + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Export from a particular key with the given prefix + tags: + - kv + /v1/kv/loadcsv: + post: + consumes: + - multipart/form-data + description: KVLoadCSVHandler is the api handler to load a csv file as key and + value in a KV table + parameters: + - description: pod name + in: formData + name: podName + required: true + type: string + - description: table name + in: formData + name: tableName + required: true + type: string + - description: keep in memory + in: formData + name: memory + type: string + - description: file to upload + in: formData + name: csv + required: true + type: file + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.response' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Upload a csv file in kv table + tags: + - kv + /v1/kv/ls: + get: + consumes: + - application/json + description: KVListHandler is the api handler to list all the key value tables + in a pod + parameters: + - description: pod name + in: query + name: podName + required: true + type: string + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.Collections' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: List all key value tables + tags: + - kv + /v1/kv/new: + post: + consumes: + - application/json + description: KVCreateHandler is the api handler to create a key value table + parameters: + - description: kv table request + in: body + name: kv_table_request + required: true + schema: + $ref: '#/definitions/api.KVTableRequest' + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "201": + description: Created + schema: + $ref: '#/definitions/api.response' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Create a key value table + tags: + - kv + /v1/kv/open: + post: + consumes: + - application/json + description: KVOpenHandler is the api handler to open a key value table + parameters: + - description: kv table request + in: body + name: kv_table_request + required: true + schema: + $ref: '#/definitions/api.KVTableRequest' + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "201": + description: Created + schema: + $ref: '#/definitions/api.response' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Open a key value table + tags: + - kv + /v1/kv/seek: + post: + consumes: + - application/json + description: KVSeekHandler is the api handler to seek to a particular key with + the given prefix + parameters: + - description: kv seek info + in: body + name: export_request + required: true + schema: + $ref: '#/definitions/api.KVExportRequest' + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.response' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Seek in kv table + tags: + - kv + /v1/kv/seek/next: + post: + consumes: + - application/json + description: KVGetNextHandler is the api handler to get the key and value from + the current seek position + parameters: + - description: pod name + in: query + name: podName + required: true + type: string + - description: table name + in: query + name: tableName + required: true + type: string + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.KVResponse' + "204": + description: No Content + schema: + $ref: '#/definitions/api.response' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Get next value from last seek in kv table + tags: + - kv + /v1/pod/close: + post: + consumes: + - application/json + description: PodCloseHandler is the api handler to close an open pod + parameters: + - description: pod name + in: body + name: pod_request + required: true + schema: + $ref: '#/definitions/api.PodNameRequest' + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.response' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Close pod + tags: + - pod + /v1/pod/delete: + delete: + consumes: + - application/json + description: PodDeleteHandler is the api handler to delete a pod + parameters: + - description: pod name and user password + in: body + name: pod_request + required: true + schema: + $ref: '#/definitions/api.PodNameRequest' + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.response' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Delete pod + tags: + - pod + /v1/pod/ls: + get: + consumes: + - application/json + description: PodListHandler is the api handler to list all pods + parameters: + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.PodListResponse' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: List pods + tags: + - pod + /v1/pod/new: + post: + consumes: + - application/json + description: PodCreateHandler is the api handler to create a new pod + parameters: + - description: pod name and user password + in: body + name: pod_request + required: true + schema: + $ref: '#/definitions/api.PodNameRequest' + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "201": + description: Created + schema: + $ref: '#/definitions/api.response' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Create pod + tags: + - pod + /v1/pod/open: + post: + consumes: + - application/json + description: PodOpenHandler is the api handler to open pod + parameters: + - description: pod name and user password + in: body + name: pod_request + required: true + schema: + $ref: '#/definitions/api.PodNameRequest' + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.response' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Open pod + tags: + - pod + /v1/pod/open-async: + post: + consumes: + - application/json + description: PodOpenAsyncHandler is the api handler to open pod asynchronously + parameters: + - description: pod name and user password + in: body + name: pod_request + required: true + schema: + $ref: '#/definitions/api.PodNameRequest' + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.response' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Open pod + tags: + - pod + /v1/pod/present: + get: + consumes: + - application/json + description: PodPresentHandler is the api handler to check if a pod is present + parameters: + - description: pod name + in: query + name: podName + required: true + type: string + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.response' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Is pod present + tags: + - pod + /v1/pod/receive: + get: + consumes: + - application/json + description: PodReceiveHandler is the api handler to receive shared pod from + shared reference + parameters: + - description: pod sharing reference + in: query + name: sharingRef + required: true + type: string + - description: pod name to be saved as + in: query + name: sharedPodName + type: string + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.response' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Receive shared pod + tags: + - pod + /v1/pod/receiveinfo: + get: + consumes: + - application/json + description: PodReceiveInfoHandler is the api handler to receive shared pod + info from shared reference + parameters: + - description: pod sharing reference + in: query + name: sharingRef + required: true + type: string + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/pod.ShareInfo' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Receive shared pod info + tags: + - pod + /v1/pod/share: + post: + consumes: + - application/json + description: PodShareHandler is the api handler to share a pod to the public + parameters: + - description: pod name and user password + in: body + name: pod_request + required: true + schema: + $ref: '#/definitions/common.PodShareRequest' + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.PodSharingReference' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Share pod + tags: + - pod + /v1/pod/stat: + get: + consumes: + - application/json + description: PodStatHandler is the api handler get information about a pod + parameters: + - description: pod name + in: query + name: podName + required: true + type: string + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.PodStatResponse' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Stats for pod + tags: + - pod + /v1/pod/sync: + post: + consumes: + - application/json + description: PodSyncHandler is the api handler to sync a pod's content + parameters: + - description: pod name + in: body + name: pod_request + required: true + schema: + $ref: '#/definitions/api.PodNameRequest' + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.response' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Sync pod + tags: + - pod + /v1/user/delete: + post: + deprecated: true + responses: {} + tags: + - user + /v1/user/export: + post: + deprecated: true + responses: {} + tags: + - user + /v1/user/isloggedin: + get: + consumes: + - application/json + description: Check if the given user is logged-in + parameters: + - description: user name + in: query + name: userName + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.LoginStatus' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + summary: Is user logged-in + tags: + - user + /v1/user/login: + post: + deprecated: true + responses: {} + tags: + - user + /v1/user/logout: + post: + consumes: + - application/json + description: logs-out user + parameters: + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.response' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Logout + tags: + - user + /v1/user/present: + get: + deprecated: true + responses: {} + tags: + - user + /v1/user/signup: + post: + deprecated: true + responses: {} + tags: + - user + /v1/user/stat: + get: + consumes: + - application/json + description: show user stats + parameters: + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + responses: + "200": + description: OK + schema: + $ref: '#/definitions/user.Stat' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: User stat + tags: + - user + /v2/user/delete: + delete: + description: deletes user info from swarm + parameters: + - description: user delete request + in: body + name: UserDeleteRequest + required: true + schema: + $ref: '#/definitions/api.UserDeleteRequest' + - description: cookie parameter + in: header + name: Cookie + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.response' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Delete user for ENS based authentication + tags: + - user + /v2/user/login: + post: + consumes: + - application/json + description: login user with the new ENS based authentication + parameters: + - description: user name + in: body + name: user_request + required: true + schema: + $ref: '#/definitions/common.UserLoginRequest' + produces: + - application/json + responses: + "200": + description: OK + headers: + Set-Cookie: + description: fairos-dfs session + type: string + schema: + $ref: '#/definitions/api.UserLoginResponse' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "404": + description: Not Found + schema: + $ref: '#/definitions/api.response' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Login User + tags: + - user + /v2/user/present: + get: + description: checks if the new user is present in the new ENS based authentication + parameters: + - description: user name + in: query + name: userName + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/api.PresentResponse' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + summary: Check if user is present + tags: + - user + /v2/user/signup: + post: + consumes: + - application/json + description: registers new user with the new ENS based authentication + parameters: + - description: user name + in: body + name: user_request + required: true + schema: + $ref: '#/definitions/common.UserSignupRequest' + produces: + - application/json + responses: + "201": + description: Created + schema: + $ref: '#/definitions/api.UserSignupResponse' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.response' + "402": + description: Payment Required + schema: + $ref: '#/definitions/api.UserSignupResponse' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.response' + summary: Register New User + tags: + - user +swagger: "2.0"