diff --git a/.github/workflows/binary.yml b/.github/workflows/binary.yml
new file mode 100644
index 0000000..0977d66
--- /dev/null
+++ b/.github/workflows/binary.yml
@@ -0,0 +1,27 @@
+name: dev binary
+
+on:
+ push:
+ branches:
+ - master
+
+jobs:
+ publish:
+ runs-on: ubuntu-latest
+ steps:
+ - name: checkout
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: "Setup go"
+ uses: actions/setup-go@v4
+
+ - name: "Build binary"
+ run: |
+ make
+
+ - uses: actions/upload-artifact@v3
+ with:
+ name: vmihub-ubuntu
+ path: bin/vmihub
\ No newline at end of file
diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml
new file mode 100644
index 0000000..9c6afa4
--- /dev/null
+++ b/.github/workflows/golangci-lint.yml
@@ -0,0 +1,31 @@
+name: golangci-lint
+on:
+ push:
+ tags:
+ - '!v*'
+ branches:
+ - '*'
+ pull_request:
+
+jobs:
+ golangci:
+ name: lint
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Set up Go
+ uses: actions/setup-go@v5
+ with:
+ go-version-file: 'go.mod'
+
+ - name: golangci-lint
+ uses: golangci/golangci-lint-action@v6
+ env:
+ ACTIONS_ALLOW_UNSECURE_COMMANDS: 'true'
+ with:
+ # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
+ version: latest
+ # Optional: show only new issues if it's a pull request. The default value is `false`.
+ only-new-issues: true
diff --git a/.github/workflows/goreleaser.yml b/.github/workflows/goreleaser.yml
new file mode 100644
index 0000000..f9e0790
--- /dev/null
+++ b/.github/workflows/goreleaser.yml
@@ -0,0 +1,32 @@
+name: goreleaser
+
+on:
+ push:
+ tags:
+ - v*
+
+jobs:
+ goreleaser:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: Set up environment variables
+ run: |
+ echo "VERSION=$(git describe --tags $(git rev-list --tags --max-count=1))" >> $GITHUB_ENV
+
+ - name: Set up Go
+ uses: actions/setup-go@v4
+ with:
+ go-version-file: 'go.mod'
+
+ - name: Run GoReleaser
+ uses: goreleaser/goreleaser-action@v5
+ with:
+ version: latest
+ args: release --clean
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
new file mode 100644
index 0000000..d4a656f
--- /dev/null
+++ b/.github/workflows/test.yml
@@ -0,0 +1,20 @@
+name: test
+
+on:
+ push:
+ tags:
+ - '!v*'
+ branches:
+ - '*'
+ pull_request:
+
+jobs:
+ unittests:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-go@v4
+ with:
+ go-version-file: 'go.mod'
+ - name: unit tests
+ run: make test
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..c29b5d5
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,13 @@
+/bin
+cscope.*
+tmp/
+bk/
+vendor/
+dist/
+.idea/
+.DS_Store
+.air.toml
+config-dev.toml
+config-prod.toml
+/.vscode
+config/vmihub.toml
\ No newline at end of file
diff --git a/.golangci.yml b/.golangci.yml
new file mode 100644
index 0000000..e1489b9
--- /dev/null
+++ b/.golangci.yml
@@ -0,0 +1,102 @@
+run:
+ timeout: 5m
+ tests: false
+ modules-download-mode: readonly
+
+issues:
+ exclude-dirs:
+ - vendor
+ - tools
+ - 3rdmocks
+ - e2e
+ - webconsole
+ - fs
+ - mocks
+linters-settings:
+ nakedret:
+ max-func-lines: 59
+ misspell:
+ locale: US
+ ignore-words:
+ - hicloud
+ gofmt:
+ simplify: false
+ rewrite-rules:
+ - pattern: 'interface{}'
+ replacement: 'any'
+ - pattern: 'a[b:len(a)]'
+ replacement: 'a[b:]'
+ prealloc:
+ simple: false
+ range-loops: true
+ for-loops: true
+ errcheck:
+ check-type-assertions: true
+ gocritic:
+ disabled-checks:
+ - captLocal
+ nilnil:
+ checked-types:
+ - ptr
+ - func
+ - iface
+ - map
+ - chan
+ asasalint:
+ exclude:
+ - Append
+ - \.Wrapf
+ use-builtin-exclusions: false
+ ignore-test: true
+ usestdlibvars:
+ http-method: true
+ http-status-code: true
+ time-weekday: true
+ time-month: true
+ time-layout: true
+ crypto-hash: true
+ default-rpc-path: true
+ os-dev-null: true
+ sql-isolation-level: true
+ tls-signature-scheme: true
+ constant-kind: true
+ syslog-priority: true
+
+linters:
+ disable-all: true
+ enable:
+ - usestdlibvars
+ - asasalint
+ - bodyclose
+ - nolintlint
+ - nosprintfhostport
+ - prealloc
+ - durationcheck
+ - errname
+ - goconst
+ - gocyclo
+ - gocognit
+ - interfacebloat
+ - makezero
+ - nilerr
+ - nilnil
+ - gofmt
+ - goimports
+ - revive
+ - goprintffuncname
+ - gosec
+ - gosimple
+ - govet
+ - ineffassign
+ - misspell
+ - nakedret
+ - exportloopref
+ - staticcheck
+ - typecheck
+ - unconvert
+ - unparam
+ - unused
+ - asciicheck
+ - nestif
+ - errcheck
+ - gocritic
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000..8241853
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,24 @@
+FROM golang:bookworm as gobuilder
+
+WORKDIR /app
+COPY . .
+
+ENV GOPROXY https://goproxy.cn,direct
+RUN sed -i 's/deb.debian.org/mirrors.ustc.edu.cn/g' /etc/apt/sources.list.d/debian.sources
+RUN apt-get update
+RUN apt-get install -y libcephfs-dev librbd-dev librados-dev build-essential
+RUN make deps CN=1
+RUN make build CN=1
+RUN ./bin/vmihub --version
+
+FROM debian:bookworm
+
+RUN sed -i 's/deb.debian.org/mirrors.ustc.edu.cn/g' /etc/apt/sources.list.d/debian.sources && \
+ apt-get update && \
+ apt-get install -y libcephfs-dev librbd-dev librados-dev genisoimage qemu-utils
+WORKDIR /app
+
+COPY --from=gobuilder /app/bin/vmihub .
+
+ENTRYPOINT [ "vmihub" ]
+CMD ["--config", "config.toml", "server"]
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..d03742c
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,28 @@
+The MIT License (MIT)
+
+Original Work
+Copyright (c) 2016 Matthias Kadenbach
+https://github.com/mattes/migrate
+
+Modified Work
+Copyright (c) 2018 Dale Hui
+https://github.com/golang-migrate/migrate
+
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..adc4c20
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,93 @@
+ifeq ($(CN), 1)
+ENV := GOPROXY=https://goproxy.cn,direct
+endif
+
+SOURCE_FILES = $(shell go list -f '{{range .GoFiles}}{{$$.Dir}}/{{.}}\
+{{end}}' ./...)
+
+NS := github.com/projecteru2/vmihub
+REVISION := $(shell git rev-parse HEAD || unknown)
+BUILTAT := $(shell date +%Y-%m-%dT%H:%M:%S)
+VERSION := $(shell git describe --tags $(shell git rev-list --tags --max-count=1))
+GO_LDFLAGS ?= -X $(NS)/internal/version.REVISION=$(REVISION) \
+ -X $(NS)/internal/version.BUILTAT=$(BUILTAT) \
+ -X $(NS)/internal/version.VERSION=$(VERSION)
+ifneq ($(KEEP_SYMBOL), 1)
+ GO_LDFLAGS += -s
+endif
+
+BUILD := go build -race
+TEST := go test -count=1 -race -cover -gcflags=all=-l
+
+PKGS := $$(go list ./... | grep -v -P '$(NS)/3rd|vendor/|mocks|e2e|fs|webconsole|ovn')
+
+.PHONY: all test e2e
+
+default: build
+
+build: bin/vmihub
+
+bin/vmihub: $(SOURCE_FILES)
+ $(BUILD) -ldflags '$(GO_LDFLAGS)' -o "$@" ./cmd/vmihub
+
+lint:
+ golangci-lint run
+
+format: vet
+ gofmt -s -w $$(find . -iname '*.go' | grep -v -P '\./3rd|\./vendor/|mocks')
+
+vet:
+ go vet $(PKGS)
+
+deps:
+ $(ENV) go mod tidy
+ $(ENV) go mod vendor
+
+mock: deps
+ mockery --dir internal/storage --output internal/storage/mocks --name Storage
+ mockery --dir client/image --output client/image/mocks --all
+
+clean:
+ rm -fr bin/*
+
+setup: setup-lint
+ $(ENV) go install github.com/vektra/mockery/v2@latest
+ $(ENV) go install github.com/swaggo/swag/cmd/swag@latest
+ $(ENV) go install -tags 'mysql' github.com/golang-migrate/migrate/v4/cmd/migrate@latest
+
+setup-lint:
+ $(ENV) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.57.1
+
+swag:
+ swag init -g cmd/vmihub/main.go -o cmd/vmihub/docs
+
+test:
+ifdef RUN
+ $(TEST) -v -run='${RUN}' $(PKGS)
+else
+ $(TEST) $(PKGS)
+endif
+
+e2e:
+ifdef DIR
+ cp -f e2e/config.toml e2e/${DIR}/config.toml
+ cd e2e/${DIR} && ginkgo -r -p -- --config=`pwd`/config.toml
+else
+ cd e2e && ginkgo -r -p -- --config=`pwd`/config.toml
+endif
+
+db-migrate-setup:
+ curl -L https://github.com/golang-migrate/migrate/releases/download/v4.17.1/migrate.linux-amd64.tar.gz | tar xvz
+ mv migrate /usr/local/bin/
+
+db-migrate-create:
+ migrate create -ext sql -dir internal/models/migration ${table}
+
+db-migrate-up:
+ migrate -database '${uri}' -path internal/models/migration up
+
+db-migrate-down:
+ migrate -database '${uri}' -path internal/models/migration down ${N}
+
+db-migrate-setver:
+ migrate -database '${uri}' -path internal/models/migration force ${ver}
\ No newline at end of file
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..fe2d229
--- /dev/null
+++ b/README.md
@@ -0,0 +1,38 @@
+vmihub
+====
+![](https://github.com/projecteru2/vmihub/workflows/test/badge.svg)
+![](https://github.com/projecteru2/vmihub/workflows/golangci-lint/badge.svg)
+
+virtual machine image hub for ERU
+
+# swagger
+generate swagger
+```shell
+make swag
+```
+
+### Mac 下安装swagger
+https://github.com/swaggo/swag/blob/v1.16.1/README_zh-CN.md
+
+generate swagger
+```shell
+swag init -g cmd/vmihub/main.go -o cmd/vmihub/docs
+```
+# 准备redis
+常规准备就好
+
+# 准备数据库
+不管是第一次初始化数据库还是后续数据库schema的修改请都参考 [这里](internal/models/migration/README.md)
+# 准备s3
+用于存储镜像文件
+
+# 编译
+
+```shell
+make
+```
+
+# 项目启动
+```shell
+bin/vmihub --config=config/config.example.toml server
+```
diff --git a/assets/assets.go b/assets/assets.go
new file mode 100644
index 0000000..0502bda
--- /dev/null
+++ b/assets/assets.go
@@ -0,0 +1,6 @@
+package assets
+
+import "embed"
+
+//go:embed *
+var Assets embed.FS
diff --git a/assets/i18n/localize/en.yaml b/assets/i18n/localize/en.yaml
new file mode 100644
index 0000000..507b595
--- /dev/null
+++ b/assets/i18n/localize/en.yaml
@@ -0,0 +1,2 @@
+healthy: "Healthy"
+unhealthy: "Unhealthy"
\ No newline at end of file
diff --git a/assets/i18n/localize/zh.yaml b/assets/i18n/localize/zh.yaml
new file mode 100644
index 0000000..81a52ea
--- /dev/null
+++ b/assets/i18n/localize/zh.yaml
@@ -0,0 +1,2 @@
+healthy: 健康
+unhealthy: 不健康
\ No newline at end of file
diff --git a/assets/templates/upload.html b/assets/templates/upload.html
new file mode 100644
index 0000000..94d91a5
--- /dev/null
+++ b/assets/templates/upload.html
@@ -0,0 +1,179 @@
+
+
+
+
+
+
+ Document
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/client/auth/auth.go b/client/auth/auth.go
new file mode 100644
index 0000000..6c885ed
--- /dev/null
+++ b/client/auth/auth.go
@@ -0,0 +1,135 @@
+package auth
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+
+ svctypes "github.com/projecteru2/vmihub/pkg/types"
+)
+
+func Register(ctx context.Context, serverURL string, body *svctypes.RegisterRequest) error {
+ reqURL := fmt.Sprintf("%s/api/v1/user/register", serverURL)
+ u, err := url.Parse(reqURL)
+ if err != nil {
+ return err
+ }
+ query := u.Query()
+ u.RawQuery = query.Encode()
+
+ bodyBytes, err := json.Marshal(body)
+ if err != nil {
+ return err
+ }
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodPost, u.String(), bytes.NewReader(bodyBytes))
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Content-Type", "application/json")
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ bs, _ := io.ReadAll(resp.Body)
+ return fmt.Errorf("failed to login, status code %d, body: %s", resp.StatusCode, string(bs))
+ }
+
+ return nil
+}
+
+func GetToken(ctx context.Context, serverURL, username, password string) (string, string, error) {
+ reqURL := fmt.Sprintf("%s/api/v1/user/token", serverURL)
+ u, err := url.Parse(reqURL)
+ if err != nil {
+ return "", "", err
+ }
+ query := u.Query()
+ u.RawQuery = query.Encode()
+
+ body := svctypes.LoginRequest{
+ Username: username,
+ Password: password,
+ }
+ bodyBytes, err := json.Marshal(body)
+ if err != nil {
+ return "", "", err
+ }
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodPost, u.String(), bytes.NewReader(bodyBytes))
+ if err != nil {
+ return "", "", err
+ }
+ req.Header.Set("Content-Type", "application/json")
+
+ resp, err := http.DefaultClient.Do(req) //nolint:bodyclose
+ if err != nil {
+ return "", "", err
+ }
+ defer func(body io.ReadCloser) {
+ _ = body.Close()
+ }(resp.Body)
+
+ if resp.StatusCode != http.StatusOK {
+ return "", "", fmt.Errorf("登录失败,HTTP状态码续:为 %d", resp.StatusCode)
+ }
+
+ var tokenResp svctypes.TokenResponse
+ if err := json.NewDecoder(resp.Body).Decode(&tokenResp); err != nil {
+ return "", "", err
+ }
+
+ return tokenResp.AccessToken, tokenResp.RefreshToken, nil
+}
+
+func RefreshToken(ctx context.Context, serverURL, accessToken, refreshToken string) (string, string, error) {
+ reqURL := fmt.Sprintf("%s/api/v1/user/refreshToken", serverURL)
+ u, err := url.Parse(reqURL)
+ if err != nil {
+ return "", "", err
+ }
+ query := u.Query()
+ u.RawQuery = query.Encode()
+
+ body := svctypes.RefreshRequest{
+ AccessToken: accessToken,
+ RefreshToken: refreshToken,
+ }
+ bodyBytes, err := json.Marshal(body)
+ if err != nil {
+ return "", "", err
+ }
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodPost, u.String(), bytes.NewReader(bodyBytes))
+ if err != nil {
+ return "", "", err
+ }
+ req.Header.Set("Content-Type", "application/json")
+
+ resp, err := http.DefaultClient.Do(req) //nolint:bodyclose
+ if err != nil {
+ return "", "", err
+ }
+ defer func(body io.ReadCloser) {
+ _ = body.Close()
+ }(resp.Body)
+
+ if resp.StatusCode != http.StatusOK {
+ return "", "", fmt.Errorf("登录失败,HTTP状态码续:为 %d", resp.StatusCode)
+ }
+
+ var tokenResp svctypes.TokenResponse
+ if err := json.NewDecoder(resp.Body).Decode(&tokenResp); err != nil {
+ return "", "", err
+ }
+
+ return tokenResp.AccessToken, tokenResp.RefreshToken, nil
+}
diff --git a/client/auth/auth_test.go b/client/auth/auth_test.go
new file mode 100644
index 0000000..a90ad36
--- /dev/null
+++ b/client/auth/auth_test.go
@@ -0,0 +1,56 @@
+package auth
+
+import (
+ "context"
+ "encoding/json"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ svctypes "github.com/projecteru2/vmihub/pkg/types"
+)
+
+func TestGetToken(t *testing.T) {
+ // create test server
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // 模拟登录请求的处理
+ var loginReq svctypes.LoginRequest
+ if err := json.NewDecoder(r.Body).Decode(&loginReq); err != nil {
+ t.Errorf("Failed to decode login request: %v", err)
+ }
+
+ // 模拟根据用户名和密码返回token
+ if loginReq.Username == "testuser" && loginReq.Password == "testpassword" {
+ resp := svctypes.TokenResponse{
+ AccessToken: "testtoken",
+ }
+ jsonResp, err := json.Marshal(resp)
+ if err != nil {
+ t.Errorf("Failed to marshal login response: %v", err)
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ w.Write(jsonResp)
+ } else {
+ w.WriteHeader(http.StatusUnauthorized)
+ }
+ }))
+ defer server.Close()
+
+ // set test params
+ serverURL := server.URL
+ username := "testuser"
+ password := "testpassword"
+
+ // test GetToken
+ token, _, err := GetToken(context.Background(), serverURL, username, password)
+ if err != nil {
+ t.Errorf("GetToken returned an error: %v", err)
+ }
+
+ expectedToken := "testtoken"
+ if token != expectedToken {
+ t.Errorf("Token is incorrect. Expected: %s, Got: %s", expectedToken, token)
+ }
+}
diff --git a/client/base/base.go b/client/base/base.go
new file mode 100644
index 0000000..bf6b7d0
--- /dev/null
+++ b/client/base/base.go
@@ -0,0 +1,141 @@
+package base
+
+import (
+ "bytes"
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io"
+ "math"
+ "net/http"
+ "net/url"
+
+ "github.com/cockroachdb/errors"
+ "github.com/projecteru2/vmihub/client/terrors"
+ "github.com/projecteru2/vmihub/client/types"
+)
+
+type APIImpl struct {
+ ServerURL string
+ Cred *types.Credential
+}
+
+func NewAPI(addr string, cred *types.Credential) *APIImpl {
+ impl := &APIImpl{
+ ServerURL: addr,
+ Cred: cred,
+ }
+ return impl
+}
+
+func (i *APIImpl) AddAuth(req *http.Request) error {
+ var val string
+ if i.Cred.Username != "" && i.Cred.Password != "" {
+ val = fmt.Sprintf("Basic %s", base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", i.Cred.Username, i.Cred.Password))))
+ } else if i.Cred.Token != "" {
+ val = fmt.Sprintf("Bearer %s", i.Cred.Token)
+ }
+ req.Header.Set("Authorization", val)
+ return nil
+}
+
+func (i *APIImpl) AddAuthToHeader(req *http.Header) error {
+ var val string
+ if i.Cred.Username != "" && i.Cred.Password != "" {
+ val = fmt.Sprintf("Basic %s", base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", i.Cred.Username, i.Cred.Password))))
+ } else if i.Cred.Token != "" {
+ val = fmt.Sprintf("Bearer %s", i.Cred.Token)
+ }
+ req.Set("Authorization", val)
+ return nil
+}
+
+func (i *APIImpl) HTTPRequest(ctx context.Context, reqURL, method string, urlQueryValues map[string]string, bodyData any) (resRaw map[string]any, err error) {
+ u, err := url.Parse(reqURL)
+ if err != nil {
+ return
+ }
+
+ query := u.Query()
+ if len(urlQueryValues) > 0 {
+ for key, value := range urlQueryValues {
+ query.Add(key, value)
+ }
+ }
+ u.RawQuery = query.Encode()
+
+ var bodyBytes []byte
+ if bodyData != nil {
+ bodyBytes, err = json.Marshal(bodyData)
+ if err != nil {
+ return nil, err
+ }
+ }
+ req, err := http.NewRequestWithContext(ctx, method, u.String(), bytes.NewReader(bodyBytes))
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", "application/json")
+ _ = i.AddAuth(req)
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ data, err := GetCommonRawResponse(resp)
+ return data, err
+}
+
+func (i *APIImpl) HTTPPut(ctx context.Context, reqURL string, urlQueryValues map[string]string, bodyData any) (resRaw map[string]any, err error) {
+ return i.HTTPRequest(ctx, reqURL, http.MethodPut, urlQueryValues, bodyData)
+}
+
+func (i *APIImpl) HTTPPost(ctx context.Context, reqURL string, urlQueryValues map[string]string, bodyData any) (resRaw map[string]any, err error) {
+ return i.HTTPRequest(ctx, reqURL, http.MethodPut, urlQueryValues, bodyData)
+}
+
+func (i *APIImpl) HTTPGet(ctx context.Context, reqURL string, urlQueryValues map[string]string) (resRaw map[string]any, err error) {
+ return i.HTTPRequest(ctx, reqURL, http.MethodGet, urlQueryValues, nil)
+}
+
+func (i *APIImpl) HTTPDelete(ctx context.Context, reqURL string, urlQueryValues map[string]string) (resRaw map[string]any, err error) {
+ return i.HTTPRequest(ctx, reqURL, http.MethodDelete, urlQueryValues, nil)
+}
+
+func GetCommonRawResponse(resp *http.Response) (resRaw map[string]any, err error) {
+ if resp.StatusCode != http.StatusOK {
+ err = errors.Wrapf(terrors.ErrHTTPError, "status: %d, error: %v", resp.StatusCode, resRaw["error"])
+ return
+ }
+ bs, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ resRaw = map[string]any{}
+ err = json.Unmarshal(bs, &resRaw)
+ if err != nil {
+ err = errors.Wrapf(err, "failed to decode response: %s", string(bs))
+ return
+ }
+ return
+}
+
+func GetCommonResponseData(resRaw map[string]any) (data []byte, err error) {
+ val, ok := resRaw["data"]
+ if !ok {
+ return nil, nil
+ }
+ data, err = json.Marshal(val)
+ return
+}
+
+func GetCommonPageListResponse(resRaw map[string]any) (data []byte, total int64, err error) {
+ val, ok := resRaw["data"]
+ if !ok {
+ return nil, 0, nil
+ }
+ total = int64(math.Round(resRaw["total"].(float64)))
+ data, err = json.Marshal(val)
+ return
+}
diff --git a/client/image/chunk.go b/client/image/chunk.go
new file mode 100644
index 0000000..afbdcdc
--- /dev/null
+++ b/client/image/chunk.go
@@ -0,0 +1,246 @@
+package image
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "math"
+ "mime/multipart"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strconv"
+
+ "github.com/cockroachdb/errors"
+ "github.com/dustin/go-humanize"
+ "github.com/projecteru2/vmihub/client/terrors"
+ "github.com/projecteru2/vmihub/client/types"
+ "github.com/projecteru2/vmihub/client/util"
+ svctypes "github.com/projecteru2/vmihub/pkg/types"
+)
+
+func (i *APIImpl) StartUploadImageChunk(ctx context.Context, chunk *types.ChunkSlice, force bool) (err error) {
+ metadata, err := chunk.LoadLocalMetadata()
+ if err != nil {
+ return err
+ }
+ reqURL := fmt.Sprintf("%s/api/v1/image/%s/%s/startChunkUpload",
+ i.ServerURL, chunk.Username, chunk.Name)
+
+ u, err := url.Parse(reqURL)
+ if err != nil {
+ return err
+ }
+ body := &svctypes.ImageCreateRequest{
+ Username: chunk.Username,
+ Name: chunk.Name,
+ Tag: chunk.Tag,
+ Size: chunk.Size,
+ Private: chunk.Private,
+ Digest: metadata.Digest,
+ Format: chunk.Format,
+ OS: chunk.OS,
+ Description: chunk.Description,
+ }
+ bodyBytes, _ := json.Marshal(body)
+ nChunks := math.Ceil(float64(chunk.Size) / float64(chunk.ChunkSize))
+ query := u.Query()
+ query.Add("force", strconv.FormatBool(force))
+ query.Add("chunkSize", strconv.FormatInt(chunk.ChunkSize, 10))
+ query.Add("nChunks", strconv.FormatInt(int64(nChunks), 10))
+
+ u.RawQuery = query.Encode()
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodPost, u.String(), bytes.NewReader(bodyBytes))
+ if err != nil {
+ return err
+ }
+ err = i.AddAuth(req)
+ if err != nil {
+ return err
+ }
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ data, err := util.GetRespData(resp)
+ if err != nil {
+ return err
+ }
+ var obj map[string]any
+ if err := json.Unmarshal(data, &obj); err != nil {
+ return err
+ }
+ uploadIDRaw := obj["uploadID"]
+ chunk.UploadID, _ = uploadIDRaw.(string)
+ return err
+}
+
+// MergeChunk after uploaded big size file slice, need merge slice
+func (i *APIImpl) MergeChunk(ctx context.Context, uploadID string) error {
+ reqURL := fmt.Sprintf("%s/api/v1/image/chunk/merge", i.ServerURL)
+
+ u, err := url.Parse(reqURL)
+ if err != nil {
+ return err
+ }
+ query := u.Query()
+ query.Add("uploadID", uploadID)
+
+ u.RawQuery = query.Encode()
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodPost, u.String(), nil)
+ if err != nil {
+ return err
+ }
+ err = i.AddAuth(req)
+ if err != nil {
+ return err
+ }
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ _, err = util.GetRespData(resp)
+ return err
+}
+
+func (i *APIImpl) DownloadImageChunk(ctx context.Context, chunk *types.ChunkSlice, cIdx int64) error {
+ reqURL := fmt.Sprintf("%s/api/v1/image/%s/%s/chunk/%d/download",
+ i.ServerURL, chunk.Username, chunk.Name, cIdx)
+
+ u, err := url.Parse(reqURL)
+ if err != nil {
+ return err
+ }
+ query := u.Query()
+ query.Add("tag", chunk.Tag)
+ query.Add("chunkSize", humanize.Bytes(uint64(chunk.ChunkSize)))
+
+ u.RawQuery = query.Encode()
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
+ if err != nil {
+ return err
+ }
+ err = i.AddAuth(req)
+ if err != nil {
+ return err
+ }
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return errors.Wrapf(terrors.ErrNetworkError, "status: %s", resp.StatusCode)
+ }
+
+ chunkSliceFile := chunk.SliceFileIndexPath(int(cIdx))
+ if err := util.EnsureDir(filepath.Dir(chunkSliceFile)); err != nil {
+ return err
+ }
+
+ out, err := os.OpenFile(chunkSliceFile, os.O_WRONLY|os.O_CREATE, 0766)
+ if err != nil {
+ return errors.Wrapf(err, "failed to create %s", chunkSliceFile)
+ }
+ defer out.Close()
+
+ // 将下载的文件内容写入本地文件
+ _, err = io.Copy(out, resp.Body)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (i *APIImpl) UploadImageChunk(ctx context.Context, chunk *types.ChunkSlice, cIdx int64) error {
+ reqURL := fmt.Sprintf("%s/api/v1/image/chunk/%d/upload",
+ i.ServerURL, cIdx)
+
+ filePath := chunk.Filepath()
+ fp, err := os.Open(filePath)
+ if err != nil {
+ return errors.Wrapf(err, "failed to open %s", filePath)
+ }
+ defer fp.Close()
+ // get slice part
+ offset := cIdx * chunk.ChunkSize
+ _, err = fp.Seek(offset, 0)
+ if err != nil {
+ return errors.Wrapf(err, "failed to seek to %d", offset)
+ }
+ reader := io.LimitReader(fp, chunk.ChunkSize)
+
+ body := &bytes.Buffer{}
+ writer := multipart.NewWriter(body)
+
+ part, err := writer.CreateFormFile("file", filepath.Base(filePath))
+ if err != nil {
+ return errors.Wrapf(err, "failed to create form field")
+ }
+
+ if _, err := io.Copy(part, reader); err != nil {
+ return errors.Wrapf(err, "failed to copy")
+ }
+ _ = writer.Close()
+
+ u, err := url.Parse(reqURL)
+ if err != nil {
+ return err
+ }
+ query := u.Query()
+ query.Add("uploadID", chunk.UploadID)
+
+ u.RawQuery = query.Encode()
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodPost, u.String(), body)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Content-Type", writer.FormDataContentType())
+ err = i.AddAuth(req)
+ if err != nil {
+ return err
+ }
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ _, err = util.GetRespData(resp)
+ return err
+}
+
+func mergeSliceFile(chunk *types.ChunkSlice, nChunks int) error {
+ chunkSliceFile := chunk.SliceFilePath()
+ dest, err := os.OpenFile(chunk.SliceFilePath(), os.O_WRONLY|os.O_CREATE, 0766)
+ if err != nil {
+ return errors.Wrapf(err, "failed to create %s", chunkSliceFile)
+ }
+ defer dest.Close()
+
+ for cIdx := 0; cIdx < nChunks; cIdx++ {
+ src, err := os.OpenFile(chunk.SliceFileIndexPath(cIdx), os.O_RDONLY, 0766)
+ if err != nil {
+ return err
+ }
+ defer src.Close()
+ if _, err = io.Copy(dest, src); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/client/image/image.go b/client/image/image.go
new file mode 100644
index 0000000..3a2398f
--- /dev/null
+++ b/client/image/image.go
@@ -0,0 +1,601 @@
+package image
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/fs"
+ "math"
+ "mime/multipart"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/cenkalti/backoff/v4"
+ "github.com/cockroachdb/errors"
+ "github.com/dustin/go-humanize"
+ "github.com/panjf2000/ants/v2"
+
+ "github.com/projecteru2/vmihub/client/base"
+ "github.com/projecteru2/vmihub/client/terrors"
+ "github.com/projecteru2/vmihub/client/types"
+ "github.com/projecteru2/vmihub/client/util"
+ svctypes "github.com/projecteru2/vmihub/pkg/types"
+ svcutils "github.com/projecteru2/vmihub/pkg/utils"
+)
+
+type API interface {
+ NewImage(imgName string) (img *types.Image, err error)
+ ListImages(ctx context.Context, user string, pageN, pageSize int) ([]*types.Image, int, error)
+ ListLocalImages() ([]*types.Image, error)
+ Push(ctx context.Context, img *types.Image, force bool) error
+ Pull(ctx context.Context, imgName string, policy PullPolicy) (img *types.Image, err error)
+ GetInfo(ctx context.Context, imgFullname string) (info *types.Image, err error)
+ RemoveLocalImage(ctx context.Context, img *types.Image) (err error)
+ RemoveImage(ctx context.Context, img *types.Image) (err error)
+}
+
+type APIImpl struct {
+ base.APIImpl
+ opts *Options
+ baseDir string
+ chunkSize int64
+ threshold int64
+ mdb *types.MetadataDB
+}
+
+func NewAPI(addr string, baseDir string, cred *types.Credential, options ...Option) (*APIImpl, error) {
+ opts := &Options{"100M", "1G"}
+ for _, option := range options {
+ option(opts)
+ }
+ chunkSize, err := humanize.ParseBytes(opts.chunkSize)
+ if err != nil {
+ return nil, err
+ }
+ threshold, err := humanize.ParseBytes(opts.threshold)
+ if err != nil {
+ return nil, err
+ }
+ err = util.EnsureDir(filepath.Join(baseDir, "image"))
+ if err != nil {
+ return nil, errors.Wrapf(terrors.ErrFSError, "failed to create dir: %s", err)
+ }
+ mdb, err := types.NewMetadataDB(baseDir, "images")
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to open db")
+ }
+ img := &APIImpl{
+ APIImpl: *base.NewAPI(addr, cred),
+ opts: opts,
+ baseDir: baseDir,
+ chunkSize: int64(chunkSize),
+ threshold: int64(threshold),
+ mdb: mdb,
+ }
+ return img, nil
+}
+
+func (i *APIImpl) NewImage(imgName string) (img *types.Image, err error) {
+ return i.mdb.NewImage(imgName)
+}
+
+// ListImages get all images list
+func (i *APIImpl) ListImages(ctx context.Context, username string, pageN, pageSize int) (images []*types.Image, total int, err error) {
+ reqURL := fmt.Sprintf("%s/api/v1/images", i.ServerURL)
+
+ u, err := url.Parse(reqURL)
+ if err != nil {
+ return
+ }
+ query := u.Query()
+ query.Add("username", username)
+ query.Add("page", strconv.FormatInt(int64(pageN), 10))
+ query.Add("pageSize", strconv.FormatInt(int64(pageSize), 10))
+ u.RawQuery = query.Encode()
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
+ if err != nil {
+ return
+ }
+ _ = i.AddAuth(req)
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return
+ }
+ defer resp.Body.Close()
+
+ bs, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return
+ }
+ resRaw := map[string]any{}
+ err = json.Unmarshal(bs, &resRaw)
+ if err != nil {
+ err = errors.Wrapf(err, "failed to decode response: %s", string(bs))
+ return
+ }
+ if resp.StatusCode != http.StatusOK {
+ err = errors.Wrapf(terrors.ErrHTTPError, "status: %d, error: %v", resp.StatusCode, resRaw["error"])
+ return
+ }
+
+ if obj, ok := resRaw["data"]; ok {
+ bs, _ = json.Marshal(obj)
+ images = []*types.Image{}
+ err = json.Unmarshal(bs, &images)
+ }
+ if obj, ok := resRaw["total"]; ok {
+ total = int(obj.(float64))
+ }
+
+ return images, total, err
+}
+
+func (i *APIImpl) ListLocalImages() ([]*types.Image, error) {
+ var ans []*types.Image
+ baseDir := filepath.Join(i.baseDir, "image/")
+ err := filepath.WalkDir(baseDir, func(path string, _ fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+ if !strings.HasSuffix(path, ".img") {
+ return nil
+ }
+ imgName := strings.TrimSuffix(path, ".img")
+ imgName = strings.TrimPrefix(imgName, baseDir)
+ imgName = strings.TrimPrefix(imgName, "/")
+ img, err := i.NewImage(imgName)
+ if err != nil {
+ return err
+ }
+ ans = append(ans, img)
+ return nil
+ })
+ return ans, err
+}
+
+func (i *APIImpl) uploadWithChunk(ctx context.Context, img *types.Image, force bool) error {
+ ck := &types.ChunkSlice{
+ Image: *img,
+ ChunkSize: i.chunkSize,
+ }
+ if err := i.StartUploadImageChunk(ctx, ck, force); err != nil {
+ return err
+ }
+
+ nChunks := int64(math.Ceil(float64(ck.Size) / float64(ck.ChunkSize)))
+ retries := nChunks
+ success := 0
+ resCh := make(chan *execResult, nChunks)
+ defer close(resCh)
+
+ // set 10 to the capacity of goroutine pool and 1 second for expired duration.
+ p, _ := ants.NewPoolWithFunc(10, func(idx any) {
+ cIdx, _ := idx.(int64)
+ err := i.UploadImageChunk(ctx, ck, cIdx)
+ resCh <- &execResult{cIdx, err}
+ })
+ defer p.Release()
+
+ // Submit tasks one by one.
+ for idx := int64(0); idx < nChunks; idx++ {
+ _ = p.Invoke(idx)
+ }
+ for res := range resCh {
+ if res.err == nil {
+ success++
+ if success == int(nChunks) {
+ break
+ }
+ continue
+ }
+ if retries > 0 {
+ _ = p.Invoke(res.chunkIdx)
+ retries--
+ } else {
+ return res.err
+ }
+ }
+ return i.MergeChunk(ctx, ck.UploadID)
+}
+
+func (i *APIImpl) startUpload(ctx context.Context, img *types.Image, force bool) (uploadID string, err error) {
+ reqURL := fmt.Sprintf("%s/api/v1/image/%s/%s/startUpload", i.ServerURL, img.Username, img.Name)
+
+ metadata, err := img.LoadLocalMetadata()
+ if err != nil {
+ return "", err
+ }
+ var digest string
+ if metadata != nil {
+ digest = metadata.Digest
+ }
+
+ u, _ := url.Parse(reqURL)
+ body := &svctypes.ImageCreateRequest{
+ Username: img.Username,
+ Name: img.Name,
+ Tag: img.Tag,
+ Size: img.Size,
+ Private: img.Private,
+ Digest: digest,
+ Format: img.Format,
+ OS: img.OS,
+ Description: img.Description,
+ URL: img.URL, // just used for passing remote file when pushing
+ }
+ query := u.Query()
+ query.Add("force", strconv.FormatBool(force))
+ u.RawQuery = query.Encode()
+
+ bodyBytes, err := json.Marshal(body)
+ if err != nil {
+ return "", err
+ }
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodPost, u.String(), bytes.NewReader(bodyBytes))
+ if err != nil {
+ return "", err
+ }
+ req.Header.Set("Content-Type", "application/json")
+ _ = i.AddAuth(req)
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return "", errors.Wrap(err, "failed to execute http request")
+ }
+ defer resp.Body.Close()
+
+ data, err := util.GetRespData(resp)
+ if err != nil {
+ return "", err
+ }
+ obj := map[string]string{}
+ err = json.Unmarshal(data, &obj)
+ if err != nil {
+ return "", err
+ }
+ return obj["uploadID"], nil
+}
+
+func (i *APIImpl) upload(ctx context.Context, img *types.Image, uploadID string) (err error) {
+ reqURL := fmt.Sprintf("%s/api/v1/image/%s/%s/upload", i.ServerURL, img.Username, img.Name)
+
+ filePath := img.Filepath()
+ fp, err := os.Open(filePath)
+ if err != nil {
+ return errors.Wrapf(err, "failed to open %s", filePath)
+ }
+ defer fp.Close()
+
+ r, w := io.Pipe()
+ m := multipart.NewWriter(w)
+ errCh := make(chan error, 1)
+ go func() {
+ defer w.Close()
+ defer m.Close()
+
+ part, err := m.CreateFormFile("file", filepath.Base(filePath))
+ if err != nil {
+ errCh <- errors.Wrap(err, "failed to create form field")
+ return
+ }
+ if _, err = io.Copy(part, fp); err != nil {
+ errCh <- errors.Wrap(err, "failed copy content from file to part")
+ return
+ }
+ }()
+
+ u, _ := url.Parse(reqURL)
+ query := u.Query()
+ query.Add("uploadID", uploadID)
+ u.RawQuery = query.Encode()
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodPost, u.String(), r)
+ if err != nil {
+ return errors.Wrapf(err, "failed to create http request")
+ }
+
+ req.Header.Set("Content-Type", m.FormDataContentType())
+ _ = i.AddAuth(req)
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return errors.Wrap(err, "failed to execute http request")
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ bs, _ := io.ReadAll(resp.Body)
+ select {
+ case err = <-errCh:
+ default:
+ }
+ if err != nil {
+ return errors.Wrapf(err, "failed to push: %s, %s", resp.Status, string(bs))
+ } else { //nolint:revive
+ return errors.Newf("failed to push: %s, %s", resp.Status, string(bs))
+ }
+ }
+ return nil
+}
+
+func (i *APIImpl) uploadSingle(ctx context.Context, img *types.Image, force bool) (err error) {
+ remoteUpload := img.URL != ""
+ uploadID, err := i.startUpload(ctx, img, force)
+ if err != nil {
+ return err
+ }
+ if !remoteUpload {
+ err = i.upload(ctx, img, uploadID)
+ }
+ return
+}
+
+func (i *APIImpl) Push(ctx context.Context, img *types.Image, force bool) error {
+ var (
+ size int64
+ err error
+ )
+ if img.URL == "" {
+ size, err = util.GetFileSize(img.Filepath())
+ if err != nil {
+ return err
+ }
+ }
+ img.Size = size
+ if size > i.threshold {
+ err = i.uploadWithChunk(ctx, img, force)
+ } else {
+ err = i.uploadSingle(ctx, img, force)
+ }
+ return err
+}
+
+func (i *APIImpl) GetInfo(ctx context.Context, imgFullname string) (info *types.Image, err error) {
+ username, name, tag, err := svcutils.ParseImageName(imgFullname)
+ if err != nil {
+ return nil, err
+ }
+ reqURL := fmt.Sprintf(`%s/api/v1/image/%s/%s/info`, i.ServerURL, username, name)
+
+ u, err := url.Parse(reqURL)
+ if err != nil {
+ return nil, err
+ }
+ query := u.Query()
+ query.Add("tag", tag)
+ u.RawQuery = query.Encode()
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+ _ = i.AddAuth(req)
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ switch resp.StatusCode {
+ case http.StatusOK:
+ case http.StatusNotFound:
+ return nil, terrors.ErrImageNotFound
+ default:
+ return nil, errors.Wrapf(terrors.ErrHTTPError, "status: %d", resp.StatusCode)
+ }
+ bs, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ resRaw := map[string]any{}
+ err = json.Unmarshal(bs, &resRaw)
+ if err != nil {
+ return nil, err
+ }
+ dataObj, ok := resRaw["data"]
+ if !ok {
+ return nil, errors.Wrapf(terrors.ErrHTTPError, "response json object needs contain data field: %s", string(bs))
+ }
+ info = &types.Image{
+ BaseDir: i.baseDir,
+ MDB: i.mdb,
+ }
+ dataBs, _ := json.Marshal(dataObj)
+ if err = json.Unmarshal(dataBs, &info.ImageInfoResp); err != nil {
+ return nil, err
+ }
+ info.BaseDir = i.baseDir
+ if util.FileExists(info.Filepath()) {
+ info.ActualSize, info.VirtualSize, err = util.ImageSize(info.Filepath()) //nolint
+ }
+ return info, nil
+}
+
+func (i *APIImpl) Pull(ctx context.Context, imgName string, policy PullPolicy) (*types.Image, error) {
+ img, err := i.mdb.NewImage(imgName)
+ if err != nil {
+ return nil, err
+ }
+ switch policy {
+ case PullPolicyNever:
+ return nil, nil //nolint
+ case "":
+ if img.Tag == "latest" {
+ policy = PullPolicyAlways
+ } else {
+ policy = PullPolicyIfNotPresent
+ }
+ }
+ filePath := img.Filepath()
+ if (policy == PullPolicyIfNotPresent) && util.FileExists(filePath) {
+ meta, err := img.LoadLocalMetadata()
+ if err != nil {
+ return nil, err
+ }
+ img.Digest = meta.Digest
+ img.Size = meta.Size
+ return img, nil
+ }
+ // GetInfo can return different image object when the tag is empty or latest.
+ // so just pass the value of GetInfo to img here
+ if img, err = i.GetInfo(ctx, img.Fullname()); err != nil {
+ return nil, err
+ }
+
+ if img.Format == "rbd" {
+ return nil, errors.Newf("image in rbd format is not alllowed to download")
+ }
+ if cached, _ := img.Cached(); cached {
+ return img, nil
+ }
+
+ // download image from server
+ if img.Size > i.threshold {
+ err = i.downloadWithChunk(ctx, img)
+ } else {
+ err = i.download(ctx, img)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // check digest again
+ if cached, err := img.Cached(); err != nil || (!cached) {
+ if err == nil {
+ err = terrors.ErrInvalidDigest
+ }
+ return nil, err
+ }
+
+ return img, nil
+}
+
+func (i *APIImpl) RemoveLocalImage(_ context.Context, img *types.Image) (err error) {
+ return i.mdb.RemoveImage(img)
+}
+
+func (i *APIImpl) RemoveImage(ctx context.Context, img *types.Image) (err error) {
+ if err := i.RemoveLocalImage(ctx, img); err != nil {
+ return err
+ }
+ reqURL := fmt.Sprintf("%s/api/v1/image/%s/%s", i.ServerURL, img.Username, img.Name)
+ u, err := url.Parse(reqURL)
+ if err != nil {
+ return
+ }
+ query := u.Query()
+ query.Add("tag", img.Tag)
+ u.RawQuery = query.Encode()
+ req, err := http.NewRequestWithContext(ctx, http.MethodDelete, u.String(), nil)
+ if err != nil {
+ return
+ }
+ _ = i.AddAuth(req)
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return
+ }
+ defer resp.Body.Close()
+ _, err = util.GetRespData(resp)
+ return err
+}
+
+type execResult struct {
+ chunkIdx int64
+ err error
+}
+
+func (i *APIImpl) downloadWithChunk(ctx context.Context, img *types.Image) (err error) {
+ ck := &types.ChunkSlice{
+ Image: *img,
+ ChunkSize: i.chunkSize,
+ }
+
+ nChunks := int64(math.Ceil(float64(ck.Size) / float64(ck.ChunkSize)))
+ resCh := make(chan *execResult, nChunks)
+ defer close(resCh)
+
+ // set 10 to the capacity of goroutine pool and 1 second for expired duration.
+ p, _ := ants.NewPoolWithFunc(10, func(idx any) {
+ cIdx, _ := idx.(int64)
+
+ backoffStrategy := backoff.NewExponentialBackOff()
+ // Use the Retry operation to perform the operation with exponential backoff
+ err := backoff.Retry(func() error {
+ return i.DownloadImageChunk(ctx, ck, cIdx)
+ }, backoff.WithContext(backoffStrategy, ctx))
+ resCh <- &execResult{cIdx, err}
+ })
+ defer p.Release()
+
+ // Submit tasks one by one.
+ for idx := int64(0); idx < nChunks; idx++ {
+ _ = p.Invoke(idx)
+ }
+ var (
+ downloadErr error
+ finished int
+ )
+ for res := range resCh {
+ finished++
+ if res.err != nil {
+ downloadErr = errors.CombineErrors(downloadErr, res.err)
+ }
+ if finished >= int(nChunks) {
+ break
+ }
+ }
+ if downloadErr != nil {
+ return downloadErr
+ }
+ if err := mergeSliceFile(ck, int(nChunks)); err != nil {
+ return err
+ }
+ if err := img.CopyFrom(ck.SliceFilePath()); err != nil {
+ return err
+ }
+ _ = os.Remove(ck.SliceFilePath())
+ return nil
+}
+
+func (i *APIImpl) download(ctx context.Context, img *types.Image) (err error) {
+ reqURL := fmt.Sprintf("%s/api/v1/image/%s/%s/download", i.ServerURL, img.Username, img.Name)
+
+ u, err := url.Parse(reqURL)
+ if err != nil {
+ return
+ }
+ query := u.Query()
+ query.Add("tag", img.Tag)
+
+ u.RawQuery = query.Encode()
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
+ if err != nil {
+ return err
+ }
+ _ = i.AddAuth(req)
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ bs, _ := io.ReadAll(resp.Body)
+ return errors.Newf("failed to pull image, status code: %d, body: %s", resp.StatusCode, string(bs))
+ }
+ return i.mdb.CopyFile(img, resp.Body)
+}
diff --git a/client/image/image_test.go b/client/image/image_test.go
new file mode 100644
index 0000000..294de4c
--- /dev/null
+++ b/client/image/image_test.go
@@ -0,0 +1,128 @@
+package image
+
+import (
+ "github.com/projecteru2/vmihub/client/types"
+ svctypes "github.com/projecteru2/vmihub/pkg/types"
+)
+
+const (
+ baseDir = "/tmp/libvmihub-test"
+ testContent = "Test file contents"
+)
+
+var testImg = &types.Image{
+ ImageInfoResp: svctypes.ImageInfoResp{
+ Username: "test-user",
+ Name: "test-image",
+ Tag: "test-tag",
+ },
+
+ BaseDir: baseDir,
+}
+
+// func TestPullImage(t *testing.T) {
+// defer os.RemoveAll(baseDir)
+
+// // 创建一个测试服务器
+// server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+// // 模拟响应头中的Content-Disposition
+// w.Header().Set("Content-Disposition", "attachment; filename=test_image.tar")
+// // 模拟文件内容
+// w.Write([]byte(testContent))
+// }))
+// defer server.Close()
+
+// // 调用PullImage函数进行测试
+// cred := &types.Credential{
+// Token: "testtoken",
+// }
+// // 设置测试参数
+// api, err := New(server.URL, baseDir, cred)
+// assert.Nil(t, err)
+
+// err = api.Pull(context.Background(), testImg)
+// assert.Nil(t, err)
+
+// // 验证生成的本地文件是否存在
+// filename := testImg.Filepath()
+// assert.True(t, util.FileExists(filename))
+
+// // 验证生成的本地文件内容是否正确
+// content, err := os.ReadFile(filename)
+// assert.Nil(t, err)
+
+// assert.Equal(t, testContent, string(content))
+
+// }
+
+//func TestListLocalImages(t *testing.T) {
+// defer os.RemoveAll(baseDir)
+//
+// cred := &types.Credential{
+// Token: "Bearer test-token",
+// }
+// api, err := NewAPI("", baseDir, cred)
+// assert.Nil(t, err)
+// imgNames := map[string]string{
+// "user1/img1:tag1": "user1/img1:tag1",
+// "user1/img2:tag1": "user1/img2:tag1",
+// "user2/img1:tag1": "user2/img1:tag1",
+// "img3:tag1": "img3:tag1",
+// "img4": "img4:latest",
+// }
+// expectVals := map[string]bool{}
+// for imgName, eVal := range imgNames {
+// img1, err := api.NewImage(imgName)
+// assert.Nil(t, err)
+// err = util.CreateQcow2File(img1.Filepath(), "qcow2", 1024*1024)
+// assert.Nil(t, err)
+// expectVals[eVal] = true
+// }
+//
+// imgs, err := api.ListLocalImages()
+// assert.Nil(t, err)
+// assert.Len(t, imgs, 5)
+// for _, img := range imgs {
+// _, ok := expectVals[img.Fullname()]
+// assert.Equal(t, int64(1024*1024), img.VirtualSize)
+// assert.Truef(t, ok, "%s", img.Fullname())
+// }
+//}
+
+//func TestGetToken(t *testing.T) {
+// // 创建一个测试服务器
+// server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+// // 模拟登录请求的处理
+// var loginReq svctypes.LoginRequest
+// err := json.NewDecoder(r.Body).Decode(&loginReq)
+// assert.Nil(t, err)
+//
+// // 模拟根据用户名和密码返回token
+// if loginReq.Username == "testuser" && loginReq.Password == "testpassword" {
+// resp := svctypes.TokenResponse{
+// AccessToken: "testtoken",
+// }
+// jsonResp, err := json.Marshal(resp)
+// assert.Nil(t, err)
+//
+// w.Header().Set("Content-Type", "application/json")
+// w.WriteHeader(http.StatusOK)
+// w.Write(jsonResp)
+// } else {
+// w.WriteHeader(http.StatusUnauthorized)
+// }
+// }))
+// defer server.Close()
+//
+// // 设置测试参数
+// serverURL := server.URL
+// username := "testuser"
+// password := "testpassword"
+//
+// // 调用GetToken函数进行测试
+// token, _, err := auth.GetToken(context.Background(), serverURL, username, password)
+// assert.Nil(t, err)
+//
+// expectedToken := "testtoken"
+// assert.Equal(t, expectedToken, token)
+//}
diff --git a/client/image/mocks/API.go b/client/image/mocks/API.go
new file mode 100644
index 0000000..6f7ffe0
--- /dev/null
+++ b/client/image/mocks/API.go
@@ -0,0 +1,242 @@
+// Code generated by mockery v2.42.0. DO NOT EDIT.
+
+package mocks
+
+import (
+ context "context"
+
+ image "github.com/projecteru2/vmihub/client/image"
+ mock "github.com/stretchr/testify/mock"
+
+ types "github.com/projecteru2/vmihub/client/types"
+)
+
+// API is an autogenerated mock type for the API type
+type API struct {
+ mock.Mock
+}
+
+// GetInfo provides a mock function with given fields: ctx, imgFullname
+func (_m *API) GetInfo(ctx context.Context, imgFullname string) (*types.Image, error) {
+ ret := _m.Called(ctx, imgFullname)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetInfo")
+ }
+
+ var r0 *types.Image
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string) (*types.Image, error)); ok {
+ return rf(ctx, imgFullname)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string) *types.Image); ok {
+ r0 = rf(ctx, imgFullname)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*types.Image)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
+ r1 = rf(ctx, imgFullname)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// ListImages provides a mock function with given fields: ctx, user, pageN, pageSize
+func (_m *API) ListImages(ctx context.Context, user string, pageN int, pageSize int) ([]*types.Image, int, error) {
+ ret := _m.Called(ctx, user, pageN, pageSize)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListImages")
+ }
+
+ var r0 []*types.Image
+ var r1 int
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, int, int) ([]*types.Image, int, error)); ok {
+ return rf(ctx, user, pageN, pageSize)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, int, int) []*types.Image); ok {
+ r0 = rf(ctx, user, pageN, pageSize)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]*types.Image)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, int, int) int); ok {
+ r1 = rf(ctx, user, pageN, pageSize)
+ } else {
+ r1 = ret.Get(1).(int)
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, string, int, int) error); ok {
+ r2 = rf(ctx, user, pageN, pageSize)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// ListLocalImages provides a mock function with given fields:
+func (_m *API) ListLocalImages() ([]*types.Image, error) {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListLocalImages")
+ }
+
+ var r0 []*types.Image
+ var r1 error
+ if rf, ok := ret.Get(0).(func() ([]*types.Image, error)); ok {
+ return rf()
+ }
+ if rf, ok := ret.Get(0).(func() []*types.Image); ok {
+ r0 = rf()
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]*types.Image)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func() error); ok {
+ r1 = rf()
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// NewImage provides a mock function with given fields: imgName
+func (_m *API) NewImage(imgName string) (*types.Image, error) {
+ ret := _m.Called(imgName)
+
+ if len(ret) == 0 {
+ panic("no return value specified for NewImage")
+ }
+
+ var r0 *types.Image
+ var r1 error
+ if rf, ok := ret.Get(0).(func(string) (*types.Image, error)); ok {
+ return rf(imgName)
+ }
+ if rf, ok := ret.Get(0).(func(string) *types.Image); ok {
+ r0 = rf(imgName)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*types.Image)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(string) error); ok {
+ r1 = rf(imgName)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Pull provides a mock function with given fields: ctx, imgName, policy
+func (_m *API) Pull(ctx context.Context, imgName string, policy image.PullPolicy) (*types.Image, error) {
+ ret := _m.Called(ctx, imgName, policy)
+
+ if len(ret) == 0 {
+ panic("no return value specified for Pull")
+ }
+
+ var r0 *types.Image
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, image.PullPolicy) (*types.Image, error)); ok {
+ return rf(ctx, imgName, policy)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, image.PullPolicy) *types.Image); ok {
+ r0 = rf(ctx, imgName, policy)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*types.Image)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, image.PullPolicy) error); ok {
+ r1 = rf(ctx, imgName, policy)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Push provides a mock function with given fields: ctx, img, force
+func (_m *API) Push(ctx context.Context, img *types.Image, force bool) error {
+ ret := _m.Called(ctx, img, force)
+
+ if len(ret) == 0 {
+ panic("no return value specified for Push")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, *types.Image, bool) error); ok {
+ r0 = rf(ctx, img, force)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// RemoveImage provides a mock function with given fields: ctx, img
+func (_m *API) RemoveImage(ctx context.Context, img *types.Image) error {
+ ret := _m.Called(ctx, img)
+
+ if len(ret) == 0 {
+ panic("no return value specified for RemoveImage")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, *types.Image) error); ok {
+ r0 = rf(ctx, img)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// RemoveLocalImage provides a mock function with given fields: ctx, img
+func (_m *API) RemoveLocalImage(ctx context.Context, img *types.Image) error {
+ ret := _m.Called(ctx, img)
+
+ if len(ret) == 0 {
+ panic("no return value specified for RemoveLocalImage")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, *types.Image) error); ok {
+ r0 = rf(ctx, img)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// NewAPI creates a new instance of API. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewAPI(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *API {
+ mock := &API{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/client/image/mocks/Option.go b/client/image/mocks/Option.go
new file mode 100644
index 0000000..ec5c62e
--- /dev/null
+++ b/client/image/mocks/Option.go
@@ -0,0 +1,32 @@
+// Code generated by mockery v2.42.0. DO NOT EDIT.
+
+package mocks
+
+import (
+ image "github.com/projecteru2/vmihub/client/image"
+ mock "github.com/stretchr/testify/mock"
+)
+
+// Option is an autogenerated mock type for the Option type
+type Option struct {
+ mock.Mock
+}
+
+// Execute provides a mock function with given fields: _a0
+func (_m *Option) Execute(_a0 *image.Options) {
+ _m.Called(_a0)
+}
+
+// NewOption creates a new instance of Option. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewOption(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *Option {
+ mock := &Option{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/client/image/option.go b/client/image/option.go
new file mode 100644
index 0000000..6564e30
--- /dev/null
+++ b/client/image/option.go
@@ -0,0 +1,29 @@
+package image
+
+type Options struct {
+ chunkSize string
+ threshold string
+}
+
+type Option func(*Options)
+
+func WithChunSize(sz string) Option {
+ return func(opts *Options) {
+ opts.chunkSize = sz
+ }
+}
+
+// when image is bigger than threshold, we use chunk upload and download
+func WithChunkThreshold(sz string) Option {
+ return func(opts *Options) {
+ opts.threshold = sz
+ }
+}
+
+type PullPolicy string
+
+const (
+ PullPolicyAlways = "Always"
+ PullPolicyIfNotPresent = "IfNotPresent"
+ PullPolicyNever = "Never"
+)
diff --git a/client/terrors/errors.go b/client/terrors/errors.go
new file mode 100644
index 0000000..001a8a4
--- /dev/null
+++ b/client/terrors/errors.go
@@ -0,0 +1,15 @@
+package terrors
+
+import "github.com/cockroachdb/errors"
+
+var (
+ ErrInvalidImageName = errors.New("invalid image name")
+ ErrFSError = errors.New("filesystem error")
+ ErrNetworkError = errors.New("network error")
+ ErrHTTPError = errors.New("http error")
+ ErrInvalidHash = errors.New("invalid hash type")
+ ErrInvalidDigest = errors.New("invalid digest")
+ ErrImageNotFound = errors.New("image not found")
+
+ ErrPlaceholder = errors.New("placeholder error")
+)
diff --git a/client/types/image.go b/client/types/image.go
new file mode 100644
index 0000000..c6f6733
--- /dev/null
+++ b/client/types/image.go
@@ -0,0 +1,241 @@
+package types
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "time"
+
+ "github.com/cockroachdb/errors"
+ "github.com/projecteru2/vmihub/client/util"
+ svctypes "github.com/projecteru2/vmihub/pkg/types"
+ svcutils "github.com/projecteru2/vmihub/pkg/utils"
+ bolt "go.etcd.io/bbolt"
+)
+
+type Metadata struct {
+ Digest string `mapstructure:"digest" json:"digest"`
+ Size int64 `mapstructure:"size" json:"size"`
+ ActualSize int64 `mapstructure:"actual_size" json:"actualSize"`
+ VirtualSize int64 `mapstructure:"virtual_size" json:"virtualSize"`
+}
+
+type MetadataDB struct {
+ baseDir string
+ bucket string
+ db *bolt.DB
+}
+
+func NewMetadataDB(baseDir, bucket string) (*MetadataDB, error) {
+ db, err := bolt.Open(filepath.Join(baseDir, "metadata.db"), 0600, &bolt.Options{Timeout: 5 * time.Second})
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to open db")
+ }
+ if err := db.Update(func(tx *bolt.Tx) error {
+ _, err := tx.CreateBucketIfNotExists([]byte(bucket))
+ return err
+ }); err != nil {
+ return nil, errors.Wrapf(err, "failed to create bucket for image")
+ }
+ return &MetadataDB{
+ baseDir: baseDir,
+ bucket: bucket,
+ db: db,
+ }, nil
+}
+
+func (mdb *MetadataDB) Remove(img *Image) error {
+ return mdb.db.Update(func(tx *bolt.Tx) error {
+ b := tx.Bucket([]byte(mdb.bucket))
+ return b.Delete([]byte(img.Fullname()))
+ })
+}
+
+func (mdb *MetadataDB) NewImage(imgName string) (img *Image, err error) {
+ user, name, tag, err := svcutils.ParseImageName(imgName)
+ if err != nil {
+ return nil, err
+ }
+ img = &Image{
+ ImageInfoResp: svctypes.ImageInfoResp{
+ Username: user,
+ Name: name,
+ Tag: tag,
+ },
+ BaseDir: mdb.baseDir,
+ MDB: mdb,
+ }
+ md, err := mdb.Load(img)
+ if err != nil {
+ return
+ }
+ if md == nil {
+ return img, nil
+ }
+ img.ActualSize, img.VirtualSize = md.ActualSize, md.VirtualSize
+ img.Size = md.Size
+ img.Digest = md.Digest
+ return img, nil
+}
+
+func (mdb *MetadataDB) RemoveImage(img *Image) error {
+ if err := mdb.Remove(img); err != nil {
+ return err
+ }
+ err := os.RemoveAll(img.Filepath())
+ if err == nil || os.IsNotExist(err) {
+ return nil
+ }
+ return err
+}
+
+func (mdb *MetadataDB) CopyFile(img *Image, src io.Reader) (err error) {
+ if err := util.EnsureDir(filepath.Dir(img.Filepath())); err != nil {
+ return err
+ }
+ destF, err := os.OpenFile(img.Filepath(), os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0766)
+ if err != nil {
+ return err
+ }
+ if _, err = io.Copy(destF, src); err != nil {
+ return err
+ }
+ destF.Close()
+
+ md, err := mdb.update(img, false)
+ if err != nil {
+ return err
+ }
+ img.ActualSize, img.VirtualSize = md.ActualSize, md.VirtualSize
+ img.Digest = md.Digest
+ return err
+}
+
+// before calling this method,you should ensure the local image file exists.
+func (mdb *MetadataDB) Load(img *Image) (meta *Metadata, err error) {
+ fullname := img.Fullname()
+ localfile := img.Filepath()
+ fi, err := os.Stat(localfile)
+ if err != nil {
+ return nil, nil //nolint
+ }
+ meta = &Metadata{
+ Size: fi.Size(),
+ }
+ var exists bool
+ err = mdb.db.View(func(tx *bolt.Tx) error {
+ b := tx.Bucket([]byte(mdb.bucket))
+ v := b.Get([]byte(fullname))
+ if v != nil {
+ exists = true
+ return json.Unmarshal(v, meta)
+ }
+ return nil
+ })
+ if exists {
+ return meta, err
+ }
+ return mdb.update(img, true)
+}
+
+func (mdb *MetadataDB) update(img *Image, oldEmpty bool) (meta *Metadata, err error) {
+ fullname := img.Fullname()
+ localfile := img.Filepath()
+ fi, err := os.Stat(localfile)
+ if err != nil {
+ return nil, nil //nolint
+ }
+ meta = &Metadata{
+ Size: fi.Size(),
+ }
+ if meta.Digest, err = svcutils.CalcDigestOfFile(localfile); err != nil {
+ return nil, err
+ }
+ if meta.ActualSize, meta.VirtualSize, err = util.ImageSize(localfile); err != nil {
+ return nil, err
+ }
+
+ bs, err := json.Marshal(*meta)
+ if err != nil {
+ return nil, err
+ }
+ err = mdb.db.Update(func(tx *bolt.Tx) error {
+ b := tx.Bucket([]byte(mdb.bucket))
+ if oldEmpty && b.Get([]byte(fullname)) != nil {
+ return errors.Newf("canflict when load metadata of image %s", fullname)
+ }
+ return b.Put([]byte(fullname), bs)
+ })
+ return
+}
+
+type OSInfo = svctypes.OSInfo
+
+type Image struct {
+ svctypes.ImageInfoResp
+
+ BaseDir string `mapstructure:"-" json:"-"`
+ URL string `mapstructure:"-" json:"-"`
+ VirtualSize int64 `mapstructure:"-" json:"-"`
+ ActualSize int64 `mapstructure:"-" json:"-"`
+ MDB *MetadataDB `mapstructure:"-" json:"-"`
+}
+
+func (img *Image) CopyFrom(fname string) error {
+ if fname == img.Filepath() {
+ return nil
+ }
+ srcF, err := os.Open(fname)
+ if err != nil {
+ return err
+ }
+ defer srcF.Close()
+
+ return img.MDB.CopyFile(img, srcF)
+}
+
+// before calling this method,you should ensure the local image file exists.
+func (img *Image) LoadLocalMetadata() (meta *Metadata, err error) {
+ return img.MDB.Load(img)
+}
+
+func (img *Image) Filepath() string {
+ user := img.Username
+ if user == "" {
+ user = "_"
+ }
+ return filepath.Join(img.BaseDir, "image", fmt.Sprintf("%s/%s:%s.img", user, img.Name, img.Tag))
+}
+
+func (img *Image) Cached() (ans bool, err error) {
+ meta, err := img.MDB.Load(img)
+ if err != nil || meta == nil {
+ return false, err
+ }
+ return img.Digest == meta.Digest, nil
+}
+
+type ChunkSlice struct {
+ Image
+
+ UploadID string `mapstructure:"uploadId" json:"uploadId"`
+ ChunkSize int64 `mapstructure:"chunkSize" json:"chunkSize"`
+}
+
+func (chunk *ChunkSlice) SliceFilePath() string {
+ user := chunk.Username
+ if user == "" {
+ user = "_"
+ }
+ return filepath.Join(chunk.BaseDir, "image", fmt.Sprintf("%s/__slice_%s:%s.img", user, chunk.Name, chunk.Tag))
+}
+
+func (chunk *ChunkSlice) SliceFileIndexPath(idx int) string {
+ user := chunk.Username
+ if user == "" {
+ user = "_"
+ }
+ return filepath.Join(chunk.BaseDir, "image", fmt.Sprintf("%s/__slice_%s:%s-%d.img", user, chunk.Name, chunk.Tag, idx))
+}
diff --git a/client/types/user.go b/client/types/user.go
new file mode 100644
index 0000000..e93d056
--- /dev/null
+++ b/client/types/user.go
@@ -0,0 +1,10 @@
+package types
+
+import "time"
+
+type Credential struct {
+ Username string `toml:"username" json:"username"`
+ Password string `toml:"password" json:"password"`
+ Token string `toml:"token" json:"token"`
+ TokenCreatedAt time.Time `toml:"token_created_at" json:"tokenCreatedAt"`
+}
diff --git a/client/util/utils.go b/client/util/utils.go
new file mode 100644
index 0000000..9fbf7a7
--- /dev/null
+++ b/client/util/utils.go
@@ -0,0 +1,131 @@
+package util
+
+import (
+ "bytes"
+ "encoding/json"
+ "io"
+ "net/http"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strconv"
+
+ "github.com/cockroachdb/errors"
+ "github.com/projecteru2/vmihub/client/terrors"
+)
+
+func EnsureDir(dirPath string) error {
+ // 检查文件夹是否存在
+ _, err := os.Stat(dirPath)
+ if os.IsNotExist(err) {
+ // 文件夹不存在,创建文件夹
+ err := os.MkdirAll(dirPath, 0755)
+ if err != nil {
+ return err
+ }
+ } else if err != nil {
+ // 其他错误
+ return err
+ }
+
+ return nil
+}
+
+func FileExists(filename string) bool {
+ info, err := os.Stat(filename)
+ if err != nil {
+ return false
+ }
+ return !info.IsDir()
+}
+
+func ImageSize(fname string) (int64, int64, error) {
+ cmdArgs := []string{"qemu-img", "info", "--output=json", fname}
+ cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...) //nolint:gosec
+ var stdout, stderr bytes.Buffer
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+ err := cmd.Run()
+ if err != nil {
+ return 0, 0, errors.Wrap(err, stderr.String())
+ }
+ res := map[string]any{}
+ err = json.Unmarshal(stdout.Bytes(), &res)
+ if err != nil {
+ return 0, 0, errors.Wrapf(err, "failed to unmarshal json: %s", stdout.String())
+ }
+ virtualSize := res["virtual-size"]
+ actualSize := res["actual-size"]
+ return int64(actualSize.(float64)), int64(virtualSize.(float64)), nil
+}
+
+func GetFileSize(filepath string) (int64, error) {
+ fi, err := os.Stat(filepath)
+ if err != nil {
+ return 0, err
+ }
+ return fi.Size(), nil
+}
+
+func CreateQcow2File(fname string, fmt string, cap int64) error {
+ if err := EnsureDir(filepath.Dir(fname)); err != nil {
+ return err
+ }
+
+ cmd := exec.Command("qemu-img", "create", "-q", "-f", fmt, fname, strconv.FormatInt(cap, 10)) //nolint:gosec
+ bs, err := cmd.CombinedOutput()
+ return errors.Wrapf(err, "failed to create qemu image: %s", string(bs))
+}
+
+func Copy(src, dest string) error {
+ srcF, err := os.OpenFile(src, os.O_RDONLY, 0766)
+ if err != nil {
+ return errors.Wrapf(err, "failed to open %s", src)
+ }
+ defer srcF.Close()
+
+ if err := EnsureDir(filepath.Dir(dest)); err != nil {
+ return errors.Wrapf(err, "failed to create dir for %s", dest)
+ }
+ destF, err := os.OpenFile(dest, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0766)
+ if err != nil {
+ return errors.Wrapf(err, "failed to open %s", dest)
+ }
+ defer destF.Close()
+
+ if _, err = io.Copy(destF, srcF); err != nil {
+ return err
+ }
+ return nil
+}
+
+func Move(src, dest string) error {
+ if err := Copy(src, dest); err != nil {
+ return err
+ }
+ // Move need to remove source file
+ return os.Remove(src)
+}
+
+func GetRespData(resp *http.Response) (data []byte, err error) {
+ bs, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ resRaw := map[string]any{}
+ err = json.Unmarshal(bs, &resRaw)
+ if err != nil {
+ err = errors.Wrapf(err, "failed to decode response: %s", string(bs))
+ return
+ }
+ if resp.StatusCode != http.StatusOK {
+ err = errors.Wrapf(terrors.ErrHTTPError, "status: %d, error: %v", resp.StatusCode, resRaw["error"])
+ return
+ }
+ val, ok := resRaw["data"]
+ if !ok {
+ return nil, nil
+ }
+ data, err = json.Marshal(val)
+ return
+}
diff --git a/cmd/vmihub/docs/docs.go b/cmd/vmihub/docs/docs.go
new file mode 100644
index 0000000..4a61f5b
--- /dev/null
+++ b/cmd/vmihub/docs/docs.go
@@ -0,0 +1,1529 @@
+// Code generated by swaggo/swag. DO NOT EDIT.
+
+package docs
+
+import "github.com/swaggo/swag"
+
+const docTemplate = `{
+ "schemes": {{ marshal .Schemes }},
+ "swagger": "2.0",
+ "info": {
+ "description": "{{escape .Description}}",
+ "title": "{{.Title}}",
+ "contact": {},
+ "version": "{{.Version}}"
+ },
+ "host": "{{.Host}}",
+ "basePath": "{{.BasePath}}",
+ "paths": {
+ "/image/:username/:name/startChunkUpload": {
+ "post": {
+ "description": "UploadImageChunk upload image chunk",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "镜像管理"
+ ],
+ "summary": "upload image chunk",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "用户名",
+ "name": "username",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "镜像名",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "boolean",
+ "description": "强制上传(覆盖)",
+ "name": "force",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "description": "chunk大小",
+ "name": "chunkSize",
+ "in": "query",
+ "required": true
+ },
+ {
+ "type": "integer",
+ "description": "chunk数量",
+ "name": "nChunks",
+ "in": "query",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/image/:username/:name/startUpload": {
+ "post": {
+ "description": "StartUpload upload image file",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "镜像管理"
+ ],
+ "summary": "upload image file",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "用户名",
+ "name": "username",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "镜像名",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "boolean",
+ "description": "强制上传(覆盖)",
+ "name": "force",
+ "in": "query"
+ },
+ {
+ "description": "镜像配置",
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/types.ImageCreateRequest"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/image/chunk/merge": {
+ "post": {
+ "description": "MergeChunk merge chunk slice file",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "镜像管理"
+ ],
+ "summary": "merge chunk slice file",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "上传uploadID",
+ "name": "uploadID",
+ "in": "query",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/image/chunk/{chunkIdx}/upload": {
+ "post": {
+ "description": "UploadImageChunk upload image chunk",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "镜像管理"
+ ],
+ "summary": "upload image chunk",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "分片序列",
+ "name": "chunkIdx",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "上传uploadID",
+ "name": "uploadID",
+ "in": "query",
+ "required": true
+ },
+ {
+ "type": "file",
+ "description": "文件",
+ "name": "file",
+ "in": "formData",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/image/{username}/{name}": {
+ "delete": {
+ "description": "DeleteImage delete image",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "镜像管理"
+ ],
+ "summary": "delete image",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "仓库用户名",
+ "name": "username",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "仓库名",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "default": "\"latest\"",
+ "description": "镜像标签",
+ "name": "tag",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/image/{username}/{name}/chunk/{chunkIdx}/download": {
+ "get": {
+ "description": "DownloadImageChunk download image chunk",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "镜像管理"
+ ],
+ "summary": "download image chunk",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "仓库用户名",
+ "name": "username",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "仓库名",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "integer",
+ "description": "分片序号",
+ "name": "chunkIdx",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "default": "\"latest\"",
+ "description": "标签",
+ "name": "tag",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "\"50M\"",
+ "description": "分片大小",
+ "name": "chunkSize",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/image/{username}/{name}/download": {
+ "get": {
+ "description": "DownloadImage download image",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "镜像管理"
+ ],
+ "summary": "download image",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "仓库用户名",
+ "name": "username",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "仓库名",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "default": "\"latest\"",
+ "description": "镜像标签",
+ "name": "tag",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/image/{username}/{name}/info": {
+ "get": {
+ "description": "GetImageInfo get image meta info",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "镜像管理"
+ ],
+ "summary": "get image meta info",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "仓库用户名",
+ "name": "username",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "仓库名",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "default": "\"latest\"",
+ "description": "镜像标签",
+ "name": "tag",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/image/{username}/{name}/upload": {
+ "post": {
+ "description": "UploadImage upload image",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "镜像管理"
+ ],
+ "summary": "upload image",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "仓库用户名",
+ "name": "username",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "仓库名",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "boolean",
+ "description": "强制上传(覆盖)",
+ "name": "force",
+ "in": "query"
+ },
+ {
+ "type": "file",
+ "description": "文件",
+ "name": "file",
+ "in": "formData",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/images": {
+ "get": {
+ "description": "ListImages get images list",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "镜像管理"
+ ],
+ "summary": "get image list",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "type": "string",
+ "default": "",
+ "description": "搜索关键字",
+ "name": "keyword",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "用户名",
+ "name": "username",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "default": 1,
+ "description": "页码",
+ "name": "page",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "default": 10,
+ "description": "每一页数量",
+ "name": "pageSize",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "desc",
+ "schema": {
+ "allOf": [
+ {
+ "$ref": "#/definitions/types.JSONResult"
+ },
+ {
+ "type": "object",
+ "properties": {
+ "data": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/types.ImageInfoResp"
+ }
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "/repositories": {
+ "get": {
+ "description": "ListRepositories get repository list",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "镜像管理"
+ ],
+ "summary": "get repository list",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "用户名",
+ "name": "username",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "default": 1,
+ "description": "页码",
+ "name": "page",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "default": 10,
+ "description": "每一页数量",
+ "name": "pageSize",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/repository/{username}/{name}": {
+ "get": {
+ "description": "ListRepoImages get image list of specified repo",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "镜像管理"
+ ],
+ "summary": "get image list of specified repository",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "用户名",
+ "name": "username",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "仓库名",
+ "name": "name",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ },
+ "delete": {
+ "description": "DeleteRepository",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "镜像管理"
+ ],
+ "summary": "delete specified repository",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "用户名",
+ "name": "username",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "仓库名",
+ "name": "name",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/user/changePwd": {
+ "post": {
+ "description": "ChangePwd register user",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "用户管理"
+ ],
+ "summary": "change user password",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "description": "修改密码",
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/types.ChangeUserPwdRequest"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "desc",
+ "schema": {
+ "allOf": [
+ {
+ "$ref": "#/definitions/types.JSONResult"
+ },
+ {
+ "type": "object",
+ "properties": {
+ "data": {
+ "$ref": "#/definitions/types.UserInfoResp"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "/user/info": {
+ "get": {
+ "description": "GetUserInfo get user info",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "用户管理"
+ ],
+ "summary": "get user info",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "desc",
+ "schema": {
+ "allOf": [
+ {
+ "$ref": "#/definitions/types.JSONResult"
+ },
+ {
+ "type": "object",
+ "properties": {
+ "data": {
+ "$ref": "#/definitions/types.UserInfoResp"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ "post": {
+ "description": "UpdateUser updatrs user information",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "用户管理"
+ ],
+ "summary": "update user information",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "description": "重置密码",
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/types.UpdateUserRequest"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "desc",
+ "schema": {
+ "allOf": [
+ {
+ "$ref": "#/definitions/types.JSONResult"
+ },
+ {
+ "type": "object",
+ "properties": {
+ "data": {
+ "$ref": "#/definitions/types.UserInfoResp"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "/user/login": {
+ "post": {
+ "description": "LoginUser login user",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "用户管理"
+ ],
+ "parameters": [
+ {
+ "description": "用户结构体",
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/types.LoginRequest"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "desc",
+ "schema": {
+ "$ref": "#/definitions/types.JSONResult"
+ }
+ }
+ }
+ }
+ },
+ "/user/logout": {
+ "post": {
+ "description": "LogoutUser logout user",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "用户管理"
+ ],
+ "responses": {
+ "200": {
+ "description": "desc",
+ "schema": {
+ "$ref": "#/definitions/types.JSONResult"
+ }
+ }
+ }
+ }
+ },
+ "/user/privateToken": {
+ "post": {
+ "description": "CreatePrivateToken create a private token for currrent user",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "用户管理"
+ ],
+ "summary": "create private token",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "description": "用户结构体",
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/types.PrivateTokenRequest"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "desc",
+ "schema": {
+ "allOf": [
+ {
+ "$ref": "#/definitions/types.JSONResult"
+ },
+ {
+ "type": "object",
+ "properties": {
+ "data": {
+ "$ref": "#/definitions/models.PrivateToken"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ "delete": {
+ "description": "DeletePrivateToken delete a private token for currrent user",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "用户管理"
+ ],
+ "summary": "delete private token",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "description": "用户结构体",
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/types.PrivateTokenDeleteRequest"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "desc",
+ "schema": {
+ "allOf": [
+ {
+ "$ref": "#/definitions/types.JSONResult"
+ },
+ {
+ "type": "object",
+ "properties": {
+ "msg": {
+ "type": "string"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "/user/privateTokens": {
+ "get": {
+ "description": "ListPrivateToken list all private tokens of current user",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "用户管理"
+ ],
+ "summary": "list private token",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "desc",
+ "schema": {
+ "allOf": [
+ {
+ "$ref": "#/definitions/types.JSONResult"
+ },
+ {
+ "type": "object",
+ "properties": {
+ "data": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/models.PrivateToken"
+ }
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "/user/refreshToken": {
+ "post": {
+ "description": "RefreshToken refresh token",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "用户管理"
+ ],
+ "summary": "refresh token",
+ "parameters": [
+ {
+ "description": "刷新Token结构体",
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/types.RefreshRequest"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "desc",
+ "schema": {
+ "allOf": [
+ {
+ "$ref": "#/definitions/types.JSONResult"
+ },
+ {
+ "type": "object",
+ "properties": {
+ "data": {
+ "$ref": "#/definitions/types.TokenResponse"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "/user/resetPwd": {
+ "post": {
+ "description": "ResetPwd resrt user password",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "用户管理"
+ ],
+ "summary": "reset user password",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "description": "重置密码",
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/types.ResetUserPwdRequest"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "desc",
+ "schema": {
+ "allOf": [
+ {
+ "$ref": "#/definitions/types.JSONResult"
+ },
+ {
+ "type": "object",
+ "properties": {
+ "data": {
+ "$ref": "#/definitions/types.UserInfoResp"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "/user/token": {
+ "post": {
+ "description": "GetUserToken get user token",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "用户管理"
+ ],
+ "parameters": [
+ {
+ "description": "用户结构体",
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/types.LoginRequest"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "desc",
+ "schema": {
+ "allOf": [
+ {
+ "$ref": "#/definitions/types.JSONResult"
+ },
+ {
+ "type": "object",
+ "properties": {
+ "data": {
+ "$ref": "#/definitions/types.TokenResponse"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ "definitions": {
+ "models.PrivateToken": {
+ "type": "object",
+ "properties": {
+ "createdAt": {
+ "type": "string"
+ },
+ "expiredAt": {
+ "type": "string"
+ },
+ "id": {
+ "type": "integer"
+ },
+ "lastUsed": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "token": {
+ "type": "string"
+ },
+ "userId": {
+ "type": "integer"
+ }
+ }
+ },
+ "pkg_types.OSInfo": {
+ "type": "object",
+ "properties": {
+ "arch": {
+ "type": "string",
+ "default": "amd64"
+ },
+ "distrib": {
+ "type": "string",
+ "default": "ubuntu"
+ },
+ "type": {
+ "type": "string",
+ "default": "linux"
+ },
+ "version": {
+ "type": "string"
+ }
+ }
+ },
+ "types.ChangeUserPwdRequest": {
+ "type": "object",
+ "required": [
+ "newPassword"
+ ],
+ "properties": {
+ "newPassword": {
+ "type": "string",
+ "maxLength": 20,
+ "minLength": 3
+ }
+ }
+ },
+ "types.ImageCreateRequest": {
+ "type": "object",
+ "properties": {
+ "description": {
+ "type": "string"
+ },
+ "digest": {
+ "type": "string"
+ },
+ "format": {
+ "type": "string"
+ },
+ "labels": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "name": {
+ "type": "string"
+ },
+ "os": {
+ "$ref": "#/definitions/pkg_types.OSInfo"
+ },
+ "private": {
+ "type": "boolean",
+ "default": false
+ },
+ "region_code": {
+ "type": "string",
+ "default": "ap-yichang-1"
+ },
+ "size": {
+ "type": "integer"
+ },
+ "tag": {
+ "type": "string",
+ "default": "latest"
+ },
+ "url": {
+ "type": "string"
+ },
+ "username": {
+ "type": "string"
+ }
+ }
+ },
+ "types.ImageInfoResp": {
+ "type": "object",
+ "properties": {
+ "createdAt": {
+ "type": "string",
+ "example": "format: RFC3339"
+ },
+ "description": {
+ "type": "string"
+ },
+ "digest": {
+ "type": "string"
+ },
+ "format": {
+ "type": "string"
+ },
+ "id": {
+ "type": "integer"
+ },
+ "name": {
+ "type": "string"
+ },
+ "os": {
+ "$ref": "#/definitions/pkg_types.OSInfo"
+ },
+ "private": {
+ "type": "boolean"
+ },
+ "repo_id": {
+ "type": "integer"
+ },
+ "size": {
+ "type": "integer"
+ },
+ "snapshot": {
+ "type": "string"
+ },
+ "tag": {
+ "type": "string"
+ },
+ "updatedAt": {
+ "type": "string",
+ "example": "format: RFC3339"
+ },
+ "username": {
+ "type": "string"
+ }
+ }
+ },
+ "types.JSONResult": {
+ "type": "object",
+ "properties": {
+ "code": {
+ "type": "integer"
+ },
+ "data": {},
+ "msg": {
+ "type": "string"
+ }
+ }
+ },
+ "types.LoginRequest": {
+ "type": "object",
+ "properties": {
+ "password": {
+ "type": "string"
+ },
+ "username": {
+ "type": "string"
+ }
+ }
+ },
+ "types.PrivateTokenDeleteRequest": {
+ "type": "object",
+ "required": [
+ "name"
+ ],
+ "properties": {
+ "name": {
+ "type": "string",
+ "maxLength": 20,
+ "minLength": 1
+ }
+ }
+ },
+ "types.PrivateTokenRequest": {
+ "type": "object",
+ "required": [
+ "name"
+ ],
+ "properties": {
+ "expiredAt": {
+ "type": "string",
+ "example": "RFC3339: 2023-11-30T14:30:00.123+08:00"
+ },
+ "name": {
+ "type": "string",
+ "maxLength": 20,
+ "minLength": 1,
+ "example": "my-token"
+ }
+ }
+ },
+ "types.RefreshRequest": {
+ "type": "object",
+ "required": [
+ "accessToken",
+ "refreshToken"
+ ],
+ "properties": {
+ "accessToken": {
+ "type": "string"
+ },
+ "refreshToken": {
+ "type": "string"
+ }
+ }
+ },
+ "types.ResetUserPwdRequest": {
+ "type": "object",
+ "required": [
+ "code",
+ "password",
+ "password1",
+ "phone",
+ "smsId"
+ ],
+ "properties": {
+ "code": {
+ "type": "string"
+ },
+ "password": {
+ "type": "string",
+ "maxLength": 20,
+ "minLength": 3
+ },
+ "password1": {
+ "type": "string",
+ "maxLength": 20,
+ "minLength": 3
+ },
+ "phone": {
+ "type": "string"
+ },
+ "smsId": {
+ "type": "string"
+ }
+ }
+ },
+ "types.TokenResponse": {
+ "type": "object",
+ "properties": {
+ "accessToken": {
+ "type": "string"
+ },
+ "refreshToken": {
+ "type": "string"
+ }
+ }
+ },
+ "types.UpdateUserRequest": {
+ "type": "object",
+ "properties": {
+ "email": {
+ "type": "string"
+ },
+ "nickname": {
+ "type": "string"
+ }
+ }
+ },
+ "types.UserInfoResp": {
+ "type": "object",
+ "required": [
+ "email",
+ "nickname",
+ "username"
+ ],
+ "properties": {
+ "email": {
+ "type": "string"
+ },
+ "id": {
+ "type": "integer"
+ },
+ "isAdmin": {
+ "type": "boolean"
+ },
+ "nickname": {
+ "type": "string",
+ "maxLength": 20,
+ "minLength": 1
+ },
+ "type": {
+ "type": "string"
+ },
+ "username": {
+ "type": "string",
+ "maxLength": 20,
+ "minLength": 1
+ }
+ }
+ }
+ }
+}`
+
+// SwaggerInfo holds exported Swagger Info so clients can modify it
+var SwaggerInfo = &swag.Spec{
+ Version: "1.0",
+ Host: "",
+ BasePath: "/api/v1",
+ Schemes: []string{},
+ Title: "vmihub project",
+ Description: "this is vmihub server.",
+ InfoInstanceName: "swagger",
+ SwaggerTemplate: docTemplate,
+}
+
+func init() {
+ swag.Register(SwaggerInfo.InstanceName(), SwaggerInfo)
+}
diff --git a/cmd/vmihub/docs/swagger.json b/cmd/vmihub/docs/swagger.json
new file mode 100644
index 0000000..0a99fc5
--- /dev/null
+++ b/cmd/vmihub/docs/swagger.json
@@ -0,0 +1,1505 @@
+{
+ "swagger": "2.0",
+ "info": {
+ "description": "this is vmihub server.",
+ "title": "vmihub project",
+ "contact": {},
+ "version": "1.0"
+ },
+ "basePath": "/api/v1",
+ "paths": {
+ "/image/:username/:name/startChunkUpload": {
+ "post": {
+ "description": "UploadImageChunk upload image chunk",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "镜像管理"
+ ],
+ "summary": "upload image chunk",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "用户名",
+ "name": "username",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "镜像名",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "boolean",
+ "description": "强制上传(覆盖)",
+ "name": "force",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "description": "chunk大小",
+ "name": "chunkSize",
+ "in": "query",
+ "required": true
+ },
+ {
+ "type": "integer",
+ "description": "chunk数量",
+ "name": "nChunks",
+ "in": "query",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/image/:username/:name/startUpload": {
+ "post": {
+ "description": "StartUpload upload image file",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "镜像管理"
+ ],
+ "summary": "upload image file",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "用户名",
+ "name": "username",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "镜像名",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "boolean",
+ "description": "强制上传(覆盖)",
+ "name": "force",
+ "in": "query"
+ },
+ {
+ "description": "镜像配置",
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/types.ImageCreateRequest"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/image/chunk/merge": {
+ "post": {
+ "description": "MergeChunk merge chunk slice file",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "镜像管理"
+ ],
+ "summary": "merge chunk slice file",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "上传uploadID",
+ "name": "uploadID",
+ "in": "query",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/image/chunk/{chunkIdx}/upload": {
+ "post": {
+ "description": "UploadImageChunk upload image chunk",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "镜像管理"
+ ],
+ "summary": "upload image chunk",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "分片序列",
+ "name": "chunkIdx",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "上传uploadID",
+ "name": "uploadID",
+ "in": "query",
+ "required": true
+ },
+ {
+ "type": "file",
+ "description": "文件",
+ "name": "file",
+ "in": "formData",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/image/{username}/{name}": {
+ "delete": {
+ "description": "DeleteImage delete image",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "镜像管理"
+ ],
+ "summary": "delete image",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "仓库用户名",
+ "name": "username",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "仓库名",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "default": "\"latest\"",
+ "description": "镜像标签",
+ "name": "tag",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/image/{username}/{name}/chunk/{chunkIdx}/download": {
+ "get": {
+ "description": "DownloadImageChunk download image chunk",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "镜像管理"
+ ],
+ "summary": "download image chunk",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "仓库用户名",
+ "name": "username",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "仓库名",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "integer",
+ "description": "分片序号",
+ "name": "chunkIdx",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "default": "\"latest\"",
+ "description": "标签",
+ "name": "tag",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "\"50M\"",
+ "description": "分片大小",
+ "name": "chunkSize",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/image/{username}/{name}/download": {
+ "get": {
+ "description": "DownloadImage download image",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "镜像管理"
+ ],
+ "summary": "download image",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "仓库用户名",
+ "name": "username",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "仓库名",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "default": "\"latest\"",
+ "description": "镜像标签",
+ "name": "tag",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/image/{username}/{name}/info": {
+ "get": {
+ "description": "GetImageInfo get image meta info",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "镜像管理"
+ ],
+ "summary": "get image meta info",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "仓库用户名",
+ "name": "username",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "仓库名",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "default": "\"latest\"",
+ "description": "镜像标签",
+ "name": "tag",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/image/{username}/{name}/upload": {
+ "post": {
+ "description": "UploadImage upload image",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "镜像管理"
+ ],
+ "summary": "upload image",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "仓库用户名",
+ "name": "username",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "仓库名",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "boolean",
+ "description": "强制上传(覆盖)",
+ "name": "force",
+ "in": "query"
+ },
+ {
+ "type": "file",
+ "description": "文件",
+ "name": "file",
+ "in": "formData",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/images": {
+ "get": {
+ "description": "ListImages get images list",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "镜像管理"
+ ],
+ "summary": "get image list",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "type": "string",
+ "default": "",
+ "description": "搜索关键字",
+ "name": "keyword",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "用户名",
+ "name": "username",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "default": 1,
+ "description": "页码",
+ "name": "page",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "default": 10,
+ "description": "每一页数量",
+ "name": "pageSize",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "desc",
+ "schema": {
+ "allOf": [
+ {
+ "$ref": "#/definitions/types.JSONResult"
+ },
+ {
+ "type": "object",
+ "properties": {
+ "data": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/types.ImageInfoResp"
+ }
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "/repositories": {
+ "get": {
+ "description": "ListRepositories get repository list",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "镜像管理"
+ ],
+ "summary": "get repository list",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "用户名",
+ "name": "username",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "default": 1,
+ "description": "页码",
+ "name": "page",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "default": 10,
+ "description": "每一页数量",
+ "name": "pageSize",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/repository/{username}/{name}": {
+ "get": {
+ "description": "ListRepoImages get image list of specified repo",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "镜像管理"
+ ],
+ "summary": "get image list of specified repository",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "用户名",
+ "name": "username",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "仓库名",
+ "name": "name",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ },
+ "delete": {
+ "description": "DeleteRepository",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "镜像管理"
+ ],
+ "summary": "delete specified repository",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "用户名",
+ "name": "username",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "仓库名",
+ "name": "name",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/user/changePwd": {
+ "post": {
+ "description": "ChangePwd register user",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "用户管理"
+ ],
+ "summary": "change user password",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "description": "修改密码",
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/types.ChangeUserPwdRequest"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "desc",
+ "schema": {
+ "allOf": [
+ {
+ "$ref": "#/definitions/types.JSONResult"
+ },
+ {
+ "type": "object",
+ "properties": {
+ "data": {
+ "$ref": "#/definitions/types.UserInfoResp"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "/user/info": {
+ "get": {
+ "description": "GetUserInfo get user info",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "用户管理"
+ ],
+ "summary": "get user info",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "desc",
+ "schema": {
+ "allOf": [
+ {
+ "$ref": "#/definitions/types.JSONResult"
+ },
+ {
+ "type": "object",
+ "properties": {
+ "data": {
+ "$ref": "#/definitions/types.UserInfoResp"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ "post": {
+ "description": "UpdateUser updatrs user information",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "用户管理"
+ ],
+ "summary": "update user information",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "description": "重置密码",
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/types.UpdateUserRequest"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "desc",
+ "schema": {
+ "allOf": [
+ {
+ "$ref": "#/definitions/types.JSONResult"
+ },
+ {
+ "type": "object",
+ "properties": {
+ "data": {
+ "$ref": "#/definitions/types.UserInfoResp"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "/user/login": {
+ "post": {
+ "description": "LoginUser login user",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "用户管理"
+ ],
+ "parameters": [
+ {
+ "description": "用户结构体",
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/types.LoginRequest"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "desc",
+ "schema": {
+ "$ref": "#/definitions/types.JSONResult"
+ }
+ }
+ }
+ }
+ },
+ "/user/logout": {
+ "post": {
+ "description": "LogoutUser logout user",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "用户管理"
+ ],
+ "responses": {
+ "200": {
+ "description": "desc",
+ "schema": {
+ "$ref": "#/definitions/types.JSONResult"
+ }
+ }
+ }
+ }
+ },
+ "/user/privateToken": {
+ "post": {
+ "description": "CreatePrivateToken create a private token for currrent user",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "用户管理"
+ ],
+ "summary": "create private token",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "description": "用户结构体",
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/types.PrivateTokenRequest"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "desc",
+ "schema": {
+ "allOf": [
+ {
+ "$ref": "#/definitions/types.JSONResult"
+ },
+ {
+ "type": "object",
+ "properties": {
+ "data": {
+ "$ref": "#/definitions/models.PrivateToken"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ "delete": {
+ "description": "DeletePrivateToken delete a private token for currrent user",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "用户管理"
+ ],
+ "summary": "delete private token",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "description": "用户结构体",
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/types.PrivateTokenDeleteRequest"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "desc",
+ "schema": {
+ "allOf": [
+ {
+ "$ref": "#/definitions/types.JSONResult"
+ },
+ {
+ "type": "object",
+ "properties": {
+ "msg": {
+ "type": "string"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "/user/privateTokens": {
+ "get": {
+ "description": "ListPrivateToken list all private tokens of current user",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "用户管理"
+ ],
+ "summary": "list private token",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "desc",
+ "schema": {
+ "allOf": [
+ {
+ "$ref": "#/definitions/types.JSONResult"
+ },
+ {
+ "type": "object",
+ "properties": {
+ "data": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/models.PrivateToken"
+ }
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "/user/refreshToken": {
+ "post": {
+ "description": "RefreshToken refresh token",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "用户管理"
+ ],
+ "summary": "refresh token",
+ "parameters": [
+ {
+ "description": "刷新Token结构体",
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/types.RefreshRequest"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "desc",
+ "schema": {
+ "allOf": [
+ {
+ "$ref": "#/definitions/types.JSONResult"
+ },
+ {
+ "type": "object",
+ "properties": {
+ "data": {
+ "$ref": "#/definitions/types.TokenResponse"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "/user/resetPwd": {
+ "post": {
+ "description": "ResetPwd resrt user password",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "用户管理"
+ ],
+ "summary": "reset user password",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "token",
+ "name": "Authorization",
+ "in": "header",
+ "required": true
+ },
+ {
+ "description": "重置密码",
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/types.ResetUserPwdRequest"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "desc",
+ "schema": {
+ "allOf": [
+ {
+ "$ref": "#/definitions/types.JSONResult"
+ },
+ {
+ "type": "object",
+ "properties": {
+ "data": {
+ "$ref": "#/definitions/types.UserInfoResp"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "/user/token": {
+ "post": {
+ "description": "GetUserToken get user token",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "用户管理"
+ ],
+ "parameters": [
+ {
+ "description": "用户结构体",
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/types.LoginRequest"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "desc",
+ "schema": {
+ "allOf": [
+ {
+ "$ref": "#/definitions/types.JSONResult"
+ },
+ {
+ "type": "object",
+ "properties": {
+ "data": {
+ "$ref": "#/definitions/types.TokenResponse"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ "definitions": {
+ "models.PrivateToken": {
+ "type": "object",
+ "properties": {
+ "createdAt": {
+ "type": "string"
+ },
+ "expiredAt": {
+ "type": "string"
+ },
+ "id": {
+ "type": "integer"
+ },
+ "lastUsed": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "token": {
+ "type": "string"
+ },
+ "userId": {
+ "type": "integer"
+ }
+ }
+ },
+ "pkg_types.OSInfo": {
+ "type": "object",
+ "properties": {
+ "arch": {
+ "type": "string",
+ "default": "amd64"
+ },
+ "distrib": {
+ "type": "string",
+ "default": "ubuntu"
+ },
+ "type": {
+ "type": "string",
+ "default": "linux"
+ },
+ "version": {
+ "type": "string"
+ }
+ }
+ },
+ "types.ChangeUserPwdRequest": {
+ "type": "object",
+ "required": [
+ "newPassword"
+ ],
+ "properties": {
+ "newPassword": {
+ "type": "string",
+ "maxLength": 20,
+ "minLength": 3
+ }
+ }
+ },
+ "types.ImageCreateRequest": {
+ "type": "object",
+ "properties": {
+ "description": {
+ "type": "string"
+ },
+ "digest": {
+ "type": "string"
+ },
+ "format": {
+ "type": "string"
+ },
+ "labels": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "name": {
+ "type": "string"
+ },
+ "os": {
+ "$ref": "#/definitions/pkg_types.OSInfo"
+ },
+ "private": {
+ "type": "boolean",
+ "default": false
+ },
+ "region_code": {
+ "type": "string",
+ "default": "ap-yichang-1"
+ },
+ "size": {
+ "type": "integer"
+ },
+ "tag": {
+ "type": "string",
+ "default": "latest"
+ },
+ "url": {
+ "type": "string"
+ },
+ "username": {
+ "type": "string"
+ }
+ }
+ },
+ "types.ImageInfoResp": {
+ "type": "object",
+ "properties": {
+ "createdAt": {
+ "type": "string",
+ "example": "format: RFC3339"
+ },
+ "description": {
+ "type": "string"
+ },
+ "digest": {
+ "type": "string"
+ },
+ "format": {
+ "type": "string"
+ },
+ "id": {
+ "type": "integer"
+ },
+ "name": {
+ "type": "string"
+ },
+ "os": {
+ "$ref": "#/definitions/pkg_types.OSInfo"
+ },
+ "private": {
+ "type": "boolean"
+ },
+ "repo_id": {
+ "type": "integer"
+ },
+ "size": {
+ "type": "integer"
+ },
+ "snapshot": {
+ "type": "string"
+ },
+ "tag": {
+ "type": "string"
+ },
+ "updatedAt": {
+ "type": "string",
+ "example": "format: RFC3339"
+ },
+ "username": {
+ "type": "string"
+ }
+ }
+ },
+ "types.JSONResult": {
+ "type": "object",
+ "properties": {
+ "code": {
+ "type": "integer"
+ },
+ "data": {},
+ "msg": {
+ "type": "string"
+ }
+ }
+ },
+ "types.LoginRequest": {
+ "type": "object",
+ "properties": {
+ "password": {
+ "type": "string"
+ },
+ "username": {
+ "type": "string"
+ }
+ }
+ },
+ "types.PrivateTokenDeleteRequest": {
+ "type": "object",
+ "required": [
+ "name"
+ ],
+ "properties": {
+ "name": {
+ "type": "string",
+ "maxLength": 20,
+ "minLength": 1
+ }
+ }
+ },
+ "types.PrivateTokenRequest": {
+ "type": "object",
+ "required": [
+ "name"
+ ],
+ "properties": {
+ "expiredAt": {
+ "type": "string",
+ "example": "RFC3339: 2023-11-30T14:30:00.123+08:00"
+ },
+ "name": {
+ "type": "string",
+ "maxLength": 20,
+ "minLength": 1,
+ "example": "my-token"
+ }
+ }
+ },
+ "types.RefreshRequest": {
+ "type": "object",
+ "required": [
+ "accessToken",
+ "refreshToken"
+ ],
+ "properties": {
+ "accessToken": {
+ "type": "string"
+ },
+ "refreshToken": {
+ "type": "string"
+ }
+ }
+ },
+ "types.ResetUserPwdRequest": {
+ "type": "object",
+ "required": [
+ "code",
+ "password",
+ "password1",
+ "phone",
+ "smsId"
+ ],
+ "properties": {
+ "code": {
+ "type": "string"
+ },
+ "password": {
+ "type": "string",
+ "maxLength": 20,
+ "minLength": 3
+ },
+ "password1": {
+ "type": "string",
+ "maxLength": 20,
+ "minLength": 3
+ },
+ "phone": {
+ "type": "string"
+ },
+ "smsId": {
+ "type": "string"
+ }
+ }
+ },
+ "types.TokenResponse": {
+ "type": "object",
+ "properties": {
+ "accessToken": {
+ "type": "string"
+ },
+ "refreshToken": {
+ "type": "string"
+ }
+ }
+ },
+ "types.UpdateUserRequest": {
+ "type": "object",
+ "properties": {
+ "email": {
+ "type": "string"
+ },
+ "nickname": {
+ "type": "string"
+ }
+ }
+ },
+ "types.UserInfoResp": {
+ "type": "object",
+ "required": [
+ "email",
+ "nickname",
+ "username"
+ ],
+ "properties": {
+ "email": {
+ "type": "string"
+ },
+ "id": {
+ "type": "integer"
+ },
+ "isAdmin": {
+ "type": "boolean"
+ },
+ "nickname": {
+ "type": "string",
+ "maxLength": 20,
+ "minLength": 1
+ },
+ "type": {
+ "type": "string"
+ },
+ "username": {
+ "type": "string",
+ "maxLength": 20,
+ "minLength": 1
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/cmd/vmihub/docs/swagger.yaml b/cmd/vmihub/docs/swagger.yaml
new file mode 100644
index 0000000..ad5e2be
--- /dev/null
+++ b/cmd/vmihub/docs/swagger.yaml
@@ -0,0 +1,982 @@
+basePath: /api/v1
+definitions:
+ models.PrivateToken:
+ properties:
+ createdAt:
+ type: string
+ expiredAt:
+ type: string
+ id:
+ type: integer
+ lastUsed:
+ type: string
+ name:
+ type: string
+ token:
+ type: string
+ userId:
+ type: integer
+ type: object
+ pkg_types.OSInfo:
+ properties:
+ arch:
+ default: amd64
+ type: string
+ distrib:
+ default: ubuntu
+ type: string
+ type:
+ default: linux
+ type: string
+ version:
+ type: string
+ type: object
+ types.ChangeUserPwdRequest:
+ properties:
+ newPassword:
+ maxLength: 20
+ minLength: 3
+ type: string
+ required:
+ - newPassword
+ type: object
+ types.ImageCreateRequest:
+ properties:
+ description:
+ type: string
+ digest:
+ type: string
+ format:
+ type: string
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ name:
+ type: string
+ os:
+ $ref: '#/definitions/pkg_types.OSInfo'
+ private:
+ default: false
+ type: boolean
+ region_code:
+ default: ap-yichang-1
+ type: string
+ size:
+ type: integer
+ tag:
+ default: latest
+ type: string
+ url:
+ type: string
+ username:
+ type: string
+ type: object
+ types.ImageInfoResp:
+ properties:
+ createdAt:
+ example: 'format: RFC3339'
+ type: string
+ description:
+ type: string
+ digest:
+ type: string
+ format:
+ type: string
+ id:
+ type: integer
+ name:
+ type: string
+ os:
+ $ref: '#/definitions/pkg_types.OSInfo'
+ private:
+ type: boolean
+ repo_id:
+ type: integer
+ size:
+ type: integer
+ snapshot:
+ type: string
+ tag:
+ type: string
+ updatedAt:
+ example: 'format: RFC3339'
+ type: string
+ username:
+ type: string
+ type: object
+ types.JSONResult:
+ properties:
+ code:
+ type: integer
+ data: {}
+ msg:
+ type: string
+ type: object
+ types.LoginRequest:
+ properties:
+ password:
+ type: string
+ username:
+ type: string
+ type: object
+ types.PrivateTokenDeleteRequest:
+ properties:
+ name:
+ maxLength: 20
+ minLength: 1
+ type: string
+ required:
+ - name
+ type: object
+ types.PrivateTokenRequest:
+ properties:
+ expiredAt:
+ example: 'RFC3339: 2023-11-30T14:30:00.123+08:00'
+ type: string
+ name:
+ example: my-token
+ maxLength: 20
+ minLength: 1
+ type: string
+ required:
+ - name
+ type: object
+ types.RefreshRequest:
+ properties:
+ accessToken:
+ type: string
+ refreshToken:
+ type: string
+ required:
+ - accessToken
+ - refreshToken
+ type: object
+ types.ResetUserPwdRequest:
+ properties:
+ code:
+ type: string
+ password:
+ maxLength: 20
+ minLength: 3
+ type: string
+ password1:
+ maxLength: 20
+ minLength: 3
+ type: string
+ phone:
+ type: string
+ smsId:
+ type: string
+ required:
+ - code
+ - password
+ - password1
+ - phone
+ - smsId
+ type: object
+ types.TokenResponse:
+ properties:
+ accessToken:
+ type: string
+ refreshToken:
+ type: string
+ type: object
+ types.UpdateUserRequest:
+ properties:
+ email:
+ type: string
+ nickname:
+ type: string
+ type: object
+ types.UserInfoResp:
+ properties:
+ email:
+ type: string
+ id:
+ type: integer
+ isAdmin:
+ type: boolean
+ nickname:
+ maxLength: 20
+ minLength: 1
+ type: string
+ type:
+ type: string
+ username:
+ maxLength: 20
+ minLength: 1
+ type: string
+ required:
+ - email
+ - nickname
+ - username
+ type: object
+info:
+ contact: {}
+ description: this is vmihub server.
+ title: vmihub project
+ version: "1.0"
+paths:
+ /image/:username/:name/startChunkUpload:
+ post:
+ consumes:
+ - application/json
+ description: UploadImageChunk upload image chunk
+ parameters:
+ - description: token
+ in: header
+ name: Authorization
+ required: true
+ type: string
+ - description: 用户名
+ in: path
+ name: username
+ required: true
+ type: string
+ - description: 镜像名
+ in: path
+ name: name
+ required: true
+ type: string
+ - description: 强制上传(覆盖)
+ in: query
+ name: force
+ type: boolean
+ - description: chunk大小
+ in: query
+ name: chunkSize
+ required: true
+ type: integer
+ - description: chunk数量
+ in: query
+ name: nChunks
+ required: true
+ type: integer
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ summary: upload image chunk
+ tags:
+ - 镜像管理
+ /image/:username/:name/startUpload:
+ post:
+ consumes:
+ - application/json
+ description: StartUpload upload image file
+ parameters:
+ - description: token
+ in: header
+ name: Authorization
+ required: true
+ type: string
+ - description: 用户名
+ in: path
+ name: username
+ required: true
+ type: string
+ - description: 镜像名
+ in: path
+ name: name
+ required: true
+ type: string
+ - description: 强制上传(覆盖)
+ in: query
+ name: force
+ type: boolean
+ - description: 镜像配置
+ in: body
+ name: body
+ required: true
+ schema:
+ $ref: '#/definitions/types.ImageCreateRequest'
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ summary: upload image file
+ tags:
+ - 镜像管理
+ /image/{username}/{name}:
+ delete:
+ consumes:
+ - application/json
+ description: DeleteImage delete image
+ parameters:
+ - description: token
+ in: header
+ name: Authorization
+ required: true
+ type: string
+ - description: 仓库用户名
+ in: path
+ name: username
+ required: true
+ type: string
+ - description: 仓库名
+ in: path
+ name: name
+ required: true
+ type: string
+ - default: '"latest"'
+ description: 镜像标签
+ in: query
+ name: tag
+ type: string
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ summary: delete image
+ tags:
+ - 镜像管理
+ /image/{username}/{name}/chunk/{chunkIdx}/download:
+ get:
+ consumes:
+ - application/json
+ description: DownloadImageChunk download image chunk
+ parameters:
+ - description: token
+ in: header
+ name: Authorization
+ required: true
+ type: string
+ - description: 仓库用户名
+ in: path
+ name: username
+ required: true
+ type: string
+ - description: 仓库名
+ in: path
+ name: name
+ required: true
+ type: string
+ - description: 分片序号
+ in: path
+ name: chunkIdx
+ required: true
+ type: integer
+ - default: '"latest"'
+ description: 标签
+ in: query
+ name: tag
+ type: string
+ - default: '"50M"'
+ description: 分片大小
+ in: query
+ name: chunkSize
+ type: string
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ summary: download image chunk
+ tags:
+ - 镜像管理
+ /image/{username}/{name}/download:
+ get:
+ consumes:
+ - application/json
+ description: DownloadImage download image
+ parameters:
+ - description: token
+ in: header
+ name: Authorization
+ required: true
+ type: string
+ - description: 仓库用户名
+ in: path
+ name: username
+ required: true
+ type: string
+ - description: 仓库名
+ in: path
+ name: name
+ required: true
+ type: string
+ - default: '"latest"'
+ description: 镜像标签
+ in: query
+ name: tag
+ type: string
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ summary: download image
+ tags:
+ - 镜像管理
+ /image/{username}/{name}/info:
+ get:
+ consumes:
+ - application/json
+ description: GetImageInfo get image meta info
+ parameters:
+ - description: token
+ in: header
+ name: Authorization
+ required: true
+ type: string
+ - description: 仓库用户名
+ in: path
+ name: username
+ required: true
+ type: string
+ - description: 仓库名
+ in: path
+ name: name
+ required: true
+ type: string
+ - default: '"latest"'
+ description: 镜像标签
+ in: query
+ name: tag
+ type: string
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ summary: get image meta info
+ tags:
+ - 镜像管理
+ /image/{username}/{name}/upload:
+ post:
+ consumes:
+ - application/json
+ description: UploadImage upload image
+ parameters:
+ - description: token
+ in: header
+ name: Authorization
+ required: true
+ type: string
+ - description: 仓库用户名
+ in: path
+ name: username
+ required: true
+ type: string
+ - description: 仓库名
+ in: path
+ name: name
+ required: true
+ type: string
+ - description: 强制上传(覆盖)
+ in: query
+ name: force
+ type: boolean
+ - description: 文件
+ in: formData
+ name: file
+ required: true
+ type: file
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ summary: upload image
+ tags:
+ - 镜像管理
+ /image/chunk/{chunkIdx}/upload:
+ post:
+ consumes:
+ - application/json
+ description: UploadImageChunk upload image chunk
+ parameters:
+ - description: token
+ in: header
+ name: Authorization
+ required: true
+ type: string
+ - description: 分片序列
+ in: path
+ name: chunkIdx
+ required: true
+ type: string
+ - description: 上传uploadID
+ in: query
+ name: uploadID
+ required: true
+ type: string
+ - description: 文件
+ in: formData
+ name: file
+ required: true
+ type: file
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ summary: upload image chunk
+ tags:
+ - 镜像管理
+ /image/chunk/merge:
+ post:
+ consumes:
+ - application/json
+ description: MergeChunk merge chunk slice file
+ parameters:
+ - description: token
+ in: header
+ name: Authorization
+ required: true
+ type: string
+ - description: 上传uploadID
+ in: query
+ name: uploadID
+ required: true
+ type: string
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ summary: merge chunk slice file
+ tags:
+ - 镜像管理
+ /images:
+ get:
+ consumes:
+ - application/json
+ description: ListImages get images list
+ parameters:
+ - description: token
+ in: header
+ name: Authorization
+ required: true
+ type: string
+ - default: ""
+ description: 搜索关键字
+ in: query
+ name: keyword
+ type: string
+ - description: 用户名
+ in: query
+ name: username
+ type: string
+ - default: 1
+ description: 页码
+ in: query
+ name: page
+ type: integer
+ - default: 10
+ description: 每一页数量
+ in: query
+ name: pageSize
+ type: integer
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: desc
+ schema:
+ allOf:
+ - $ref: '#/definitions/types.JSONResult'
+ - properties:
+ data:
+ items:
+ $ref: '#/definitions/types.ImageInfoResp'
+ type: array
+ type: object
+ summary: get image list
+ tags:
+ - 镜像管理
+ /repositories:
+ get:
+ consumes:
+ - application/json
+ description: ListRepositories get repository list
+ parameters:
+ - description: token
+ in: header
+ name: Authorization
+ required: true
+ type: string
+ - description: 用户名
+ in: query
+ name: username
+ type: string
+ - default: 1
+ description: 页码
+ in: query
+ name: page
+ type: integer
+ - default: 10
+ description: 每一页数量
+ in: query
+ name: pageSize
+ type: integer
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ summary: get repository list
+ tags:
+ - 镜像管理
+ /repository/{username}/{name}:
+ delete:
+ consumes:
+ - application/json
+ description: DeleteRepository
+ parameters:
+ - description: token
+ in: header
+ name: Authorization
+ required: true
+ type: string
+ - description: 用户名
+ in: path
+ name: username
+ required: true
+ type: string
+ - description: 仓库名
+ in: path
+ name: name
+ required: true
+ type: string
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ summary: delete specified repository
+ tags:
+ - 镜像管理
+ get:
+ consumes:
+ - application/json
+ description: ListRepoImages get image list of specified repo
+ parameters:
+ - description: token
+ in: header
+ name: Authorization
+ required: true
+ type: string
+ - description: 用户名
+ in: path
+ name: username
+ required: true
+ type: string
+ - description: 仓库名
+ in: path
+ name: name
+ required: true
+ type: string
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ summary: get image list of specified repository
+ tags:
+ - 镜像管理
+ /user/changePwd:
+ post:
+ consumes:
+ - application/json
+ description: ChangePwd register user
+ parameters:
+ - description: token
+ in: header
+ name: Authorization
+ required: true
+ type: string
+ - description: 修改密码
+ in: body
+ name: body
+ required: true
+ schema:
+ $ref: '#/definitions/types.ChangeUserPwdRequest'
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: desc
+ schema:
+ allOf:
+ - $ref: '#/definitions/types.JSONResult'
+ - properties:
+ data:
+ $ref: '#/definitions/types.UserInfoResp'
+ type: object
+ summary: change user password
+ tags:
+ - 用户管理
+ /user/info:
+ get:
+ consumes:
+ - application/json
+ description: GetUserInfo get user info
+ parameters:
+ - description: token
+ in: header
+ name: Authorization
+ required: true
+ type: string
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: desc
+ schema:
+ allOf:
+ - $ref: '#/definitions/types.JSONResult'
+ - properties:
+ data:
+ $ref: '#/definitions/types.UserInfoResp'
+ type: object
+ summary: get user info
+ tags:
+ - 用户管理
+ post:
+ consumes:
+ - application/json
+ description: UpdateUser updatrs user information
+ parameters:
+ - description: token
+ in: header
+ name: Authorization
+ required: true
+ type: string
+ - description: 重置密码
+ in: body
+ name: body
+ required: true
+ schema:
+ $ref: '#/definitions/types.UpdateUserRequest'
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: desc
+ schema:
+ allOf:
+ - $ref: '#/definitions/types.JSONResult'
+ - properties:
+ data:
+ $ref: '#/definitions/types.UserInfoResp'
+ type: object
+ summary: update user information
+ tags:
+ - 用户管理
+ /user/login:
+ post:
+ consumes:
+ - application/json
+ description: LoginUser login user
+ parameters:
+ - description: 用户结构体
+ in: body
+ name: body
+ required: true
+ schema:
+ $ref: '#/definitions/types.LoginRequest'
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: desc
+ schema:
+ $ref: '#/definitions/types.JSONResult'
+ tags:
+ - 用户管理
+ /user/logout:
+ post:
+ consumes:
+ - application/json
+ description: LogoutUser logout user
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: desc
+ schema:
+ $ref: '#/definitions/types.JSONResult'
+ tags:
+ - 用户管理
+ /user/privateToken:
+ delete:
+ consumes:
+ - application/json
+ description: DeletePrivateToken delete a private token for currrent user
+ parameters:
+ - description: token
+ in: header
+ name: Authorization
+ required: true
+ type: string
+ - description: 用户结构体
+ in: body
+ name: body
+ required: true
+ schema:
+ $ref: '#/definitions/types.PrivateTokenDeleteRequest'
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: desc
+ schema:
+ allOf:
+ - $ref: '#/definitions/types.JSONResult'
+ - properties:
+ msg:
+ type: string
+ type: object
+ summary: delete private token
+ tags:
+ - 用户管理
+ post:
+ consumes:
+ - application/json
+ description: CreatePrivateToken create a private token for currrent user
+ parameters:
+ - description: token
+ in: header
+ name: Authorization
+ required: true
+ type: string
+ - description: 用户结构体
+ in: body
+ name: body
+ required: true
+ schema:
+ $ref: '#/definitions/types.PrivateTokenRequest'
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: desc
+ schema:
+ allOf:
+ - $ref: '#/definitions/types.JSONResult'
+ - properties:
+ data:
+ $ref: '#/definitions/models.PrivateToken'
+ type: object
+ summary: create private token
+ tags:
+ - 用户管理
+ /user/privateTokens:
+ get:
+ consumes:
+ - application/json
+ description: ListPrivateToken list all private tokens of current user
+ parameters:
+ - description: token
+ in: header
+ name: Authorization
+ required: true
+ type: string
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: desc
+ schema:
+ allOf:
+ - $ref: '#/definitions/types.JSONResult'
+ - properties:
+ data:
+ items:
+ $ref: '#/definitions/models.PrivateToken'
+ type: array
+ type: object
+ summary: list private token
+ tags:
+ - 用户管理
+ /user/refreshToken:
+ post:
+ consumes:
+ - application/json
+ description: RefreshToken refresh token
+ parameters:
+ - description: 刷新Token结构体
+ in: body
+ name: body
+ required: true
+ schema:
+ $ref: '#/definitions/types.RefreshRequest'
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: desc
+ schema:
+ allOf:
+ - $ref: '#/definitions/types.JSONResult'
+ - properties:
+ data:
+ $ref: '#/definitions/types.TokenResponse'
+ type: object
+ summary: refresh token
+ tags:
+ - 用户管理
+ /user/resetPwd:
+ post:
+ consumes:
+ - application/json
+ description: ResetPwd resrt user password
+ parameters:
+ - description: token
+ in: header
+ name: Authorization
+ required: true
+ type: string
+ - description: 重置密码
+ in: body
+ name: body
+ required: true
+ schema:
+ $ref: '#/definitions/types.ResetUserPwdRequest'
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: desc
+ schema:
+ allOf:
+ - $ref: '#/definitions/types.JSONResult'
+ - properties:
+ data:
+ $ref: '#/definitions/types.UserInfoResp'
+ type: object
+ summary: reset user password
+ tags:
+ - 用户管理
+ /user/token:
+ post:
+ consumes:
+ - application/json
+ description: GetUserToken get user token
+ parameters:
+ - description: 用户结构体
+ in: body
+ name: body
+ required: true
+ schema:
+ $ref: '#/definitions/types.LoginRequest'
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: desc
+ schema:
+ allOf:
+ - $ref: '#/definitions/types.JSONResult'
+ - properties:
+ data:
+ $ref: '#/definitions/types.TokenResponse'
+ type: object
+ tags:
+ - 用户管理
+swagger: "2.0"
diff --git a/cmd/vmihub/main.go b/cmd/vmihub/main.go
new file mode 100644
index 0000000..27bc906
--- /dev/null
+++ b/cmd/vmihub/main.go
@@ -0,0 +1,155 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ _ "net/http/pprof" //nolint
+ "os"
+ "os/signal"
+ "syscall"
+ "time"
+
+ "github.com/gin-gonic/gin"
+ "github.com/gin-gonic/gin/binding"
+ "github.com/go-playground/validator/v10"
+ "github.com/projecteru2/core/log"
+ "github.com/projecteru2/core/types"
+ "github.com/projecteru2/vmihub/config"
+ "github.com/projecteru2/vmihub/internal/api"
+ "github.com/projecteru2/vmihub/internal/models"
+ storFact "github.com/projecteru2/vmihub/internal/storage/factory"
+ "github.com/projecteru2/vmihub/internal/utils"
+ myvalidator "github.com/projecteru2/vmihub/internal/validator"
+ "github.com/projecteru2/vmihub/internal/version"
+ zerolog "github.com/rs/zerolog/log"
+ cli "github.com/urfave/cli/v2"
+)
+
+var (
+ configPath string
+)
+
+func main() {
+ cli.VersionPrinter = func(_ *cli.Context) {
+ fmt.Print(version.String())
+ }
+
+ app := cli.NewApp()
+ app.Name = version.NAME
+ app.Usage = "Run vmihub"
+ app.Version = version.VERSION
+ app.Flags = []cli.Flag{
+ &cli.StringFlag{
+ Name: "config",
+ Value: "/etc/eru/vmihub.toml",
+ Usage: "config file path for vmihub, in toml",
+ Destination: &configPath,
+ EnvVars: []string{"ERU_vmihub_CONFIG_PATH"},
+ },
+ }
+ app.Commands = []*cli.Command{
+ {
+ Name: "server",
+ Usage: "run vmihub server",
+ Action: runServer,
+ },
+ }
+ app.Action = runServer
+ _ = app.Run(os.Args)
+}
+
+func prepare(_ context.Context, cfg *config.Config) error {
+ if err := models.Init(&cfg.Mysql, nil); err != nil {
+ return err
+ }
+ if _, err := storFact.Init(&cfg.Storage); err != nil {
+ return err
+ }
+ utils.SetupRedis(&cfg.Redis, nil)
+
+ return nil
+}
+
+// @title vmihub project
+// @version 1.0
+// @description this is vmihub server.
+// @BasePath /api/v1
+func runServer(_ *cli.Context) error {
+ cfg, err := config.Init(configPath)
+ if err != nil {
+ zerolog.Fatal().Err(err).Send()
+ }
+ // kill -9 is syscall. SIGKILL but can"t be catch, so don't need add it
+ ctx, cancel := signal.NotifyContext(context.TODO(), syscall.SIGINT, syscall.SIGTERM)
+ defer cancel()
+
+ logCfg := &types.ServerLogConfig{
+ Level: cfg.Log.Level,
+ UseJSON: cfg.Log.UseJSON,
+ Filename: cfg.Log.Filename,
+ MaxSize: cfg.Log.MaxSize,
+ MaxAge: cfg.Log.MaxAge,
+ MaxBackups: cfg.Log.MaxBackups,
+ }
+ if err := log.SetupLog(ctx, logCfg, cfg.Log.SentryDSN); err != nil {
+ zerolog.Fatal().Err(err).Send()
+ }
+ defer log.SentryDefer()
+
+ if err := prepare(ctx, cfg); err != nil {
+ log.WithFunc("main").Error(ctx, err, "Can't init server")
+ return err
+ }
+
+ gin.SetMode(cfg.Server.RunMode)
+ routersInit, err := api.SetupRouter()
+ if err != nil {
+ return err
+ }
+ readTimeout := cfg.Server.ReadTimeout
+ writeTimeout := cfg.Server.WriteTimeout
+ endPoint := cfg.Server.Bind
+
+ maxHeaderBytes := 1 << 20
+
+ log.WithFunc("main").Infof(ctx, "config info %s", cfg.String())
+
+ srv := &http.Server{
+ Addr: endPoint,
+ Handler: routersInit,
+ ReadTimeout: readTimeout,
+ WriteTimeout: writeTimeout,
+ MaxHeaderBytes: maxHeaderBytes,
+ }
+ log.Infof(ctx, "start http server listening %s", cfg.Server.Bind)
+
+ if v, ok := binding.Validator.Engine().(*validator.Validate); ok {
+ _ = v.RegisterValidation("email", myvalidator.ValidateEmail)
+ }
+
+ go handleSignals(ctx, func(ctx context.Context) {
+ if err := srv.Shutdown(ctx); err != nil {
+ log.Errorf(ctx, err, "Server Shutdown:")
+ }
+ })
+
+ err = srv.ListenAndServe()
+ if err != nil {
+ log.Error(ctx, err, "Error when running server")
+ }
+ return nil
+}
+
+func handleSignals(ctx context.Context, shutdownFn func(context.Context)) {
+ // Wait for interrupt signal to gracefully shutdown the server with
+ // a timeout of 5 seconds.
+ <-ctx.Done()
+
+ newCtx, cancel := context.WithTimeout(context.TODO(), 10*time.Second)
+ defer cancel()
+
+ log.Info(context.TODO(), "Shutdown Server ...")
+ shutdownFn(newCtx)
+ log.Info(context.TODO(), "Server exited")
+}
diff --git a/config/.gitignore b/config/.gitignore
new file mode 100644
index 0000000..4af16be
--- /dev/null
+++ b/config/.gitignore
@@ -0,0 +1,2 @@
+config.dev.toml
+config.prod.toml
diff --git a/config/ceph.go b/config/ceph.go
new file mode 100644
index 0000000..5a9250e
--- /dev/null
+++ b/config/ceph.go
@@ -0,0 +1,91 @@
+package config
+
+import (
+ "bytes"
+ _ "embed"
+ "errors"
+ "html/template"
+ "os"
+ "strings"
+)
+
+var (
+ //go:embed templates/ceph.conf
+ cephConfigStr string
+ cephConfigTpl *template.Template
+ //go:embed templates/ceph.client.eru.keyring
+ cephKeyringStr string
+ cephKeyringTpl *template.Template
+ //go:embed templates/rbdmap
+ rbdMapTplStr string
+)
+
+func (cfg *Config) PrepareCephConfig() error {
+ if err := os.MkdirAll("/etc/ceph", 0755); err != nil {
+ return err
+ }
+
+ if err := cfg.writeCephConfig(); err != nil {
+ return err
+ }
+ if err := cfg.writeCephKeyring(); err != nil {
+ return err
+ }
+ return cfg.writeRBDMap()
+}
+
+func (cfg *Config) writeCephConfig() (err error) {
+ fname := "/etc/ceph/ceph.conf"
+ if _, err := os.Stat(fname); !errors.Is(err, os.ErrNotExist) {
+ return nil
+ }
+ if cephConfigTpl == nil {
+ if cephConfigTpl, err = template.New("ceph_config").Parse(cephConfigStr); err != nil {
+ return
+ }
+ }
+ var monHosts []string //nolint
+ parts := strings.Split(cfg.RBD.MonHost, ",")
+ for _, p := range parts {
+ hp := strings.Split(p, ":")
+ monHosts = append(monHosts, hp[0])
+ }
+ d := map[string]any{
+ "fsid": cfg.RBD.FSID,
+ "mon_host": monHosts,
+ }
+ var buf bytes.Buffer
+ if err = cephConfigTpl.Execute(&buf, d); err != nil {
+ return err
+ }
+ return os.WriteFile(fname, buf.Bytes(), 0644) //nolint
+}
+
+func (cfg *Config) writeCephKeyring() (err error) {
+ fname := "/etc/ceph/ceph.client.eru.keyring"
+ if _, err := os.Stat(fname); !errors.Is(err, os.ErrNotExist) {
+ return nil
+ }
+ if cephKeyringTpl == nil {
+ if cephKeyringTpl, err = template.New("ceph_keyring").Parse(cephKeyringStr); err != nil {
+ return
+ }
+ }
+
+ d := map[string]any{
+ "key": cfg.RBD.Key,
+ }
+ var buf bytes.Buffer
+ if err = cephKeyringTpl.Execute(&buf, d); err != nil {
+ return
+ }
+ return os.WriteFile(fname, buf.Bytes(), 0644) //nolint
+}
+
+func (cfg *Config) writeRBDMap() error {
+ fname := "/etc/ceph/rbdmap"
+ if _, err := os.Stat(fname); !errors.Is(err, os.ErrNotExist) {
+ return nil
+ }
+ return os.WriteFile(fname, []byte(rbdMapTplStr), 0644) //nolint
+}
diff --git a/config/config.example.toml b/config/config.example.toml
new file mode 100644
index 0000000..d8d8480
--- /dev/null
+++ b/config/config.example.toml
@@ -0,0 +1,37 @@
+[server]
+run_mode = "test" # valid values: debug, test, release.
+bind = ":8080"
+read_timeout = "60s"
+write_timeout = "60s"
+
+[log]
+level = "info"
+
+[mysql]
+dsn = "root:123456@tcp(127.0.0.1:3306)/vmihub?parseTime=true"
+max_open_connections = 100
+max_idle_connections = 10
+
+[storage]
+type = "local"
+
+[storage.local]
+base_dir = "/tmp/.image/"
+
+[storage.s3]
+endpoint = "http://127.0.0.1/"
+access_key = "abcd"
+secret_key = "abcd"
+bucket = "eru-images"
+base_dir = "/tmp/.image/"
+
+[jwt]
+key = "7$!UEmVB#nKB@Iwab#SH!zofbEOGLRtE"
+
+[redis]
+addr = "127.0.0.1:6379"
+sentinel_addrs = []
+master_name = "mymaster"
+username = ""
+password = ""
+expire = 604800
diff --git a/config/config.go b/config/config.go
new file mode 100644
index 0000000..9a85c95
--- /dev/null
+++ b/config/config.go
@@ -0,0 +1,188 @@
+package config
+
+import (
+ "encoding/json"
+ "errors"
+ "os"
+ "time"
+
+ _ "embed"
+
+ "github.com/mcuadros/go-defaults"
+
+ "github.com/pelletier/go-toml"
+)
+
+var (
+ cfg *Config
+)
+
+type Config struct {
+ GlobalTimeout time.Duration `toml:"global_timeout" default:"5m"`
+ MaxConcurrency int `toml:"max_concurrency" default:"10000"`
+ Server ServerConfig `toml:"server"`
+ RBD RBDConfig `toml:"rbd"`
+ Log LogConfig `toml:"log"`
+ Redis RedisConfig `toml:"redis"`
+ Mysql MysqlConfig `toml:"mysql"`
+ Storage StorageConfig `toml:"storage"`
+ JWT JWTConfig `toml:"jwt"`
+}
+
+type ServerConfig struct {
+ RunMode string `toml:"run_mode" default:"release"`
+ Bind string `toml:"bind" default:":8080"`
+ ReadTimeout time.Duration `toml:"read_timeout" default:"5m"`
+ WriteTimeout time.Duration `toml:"write_timeout" default:"5m"`
+}
+
+type RedisConfig struct {
+ Addr string `toml:"addr"`
+ SentinelAddrs []string `toml:"sentinel_addrs"`
+ MasterName string `toml:"master_name"`
+ Username string `toml:"username"`
+ Password string `toml:"password"`
+ DB int `toml:"db"`
+ Expire uint `toml:"expire"`
+}
+
+func (src *RedisConfig) CopyToIfEmpty(dest *RedisConfig) {
+ if dest.Addr == "" {
+ dest.Addr = src.Addr
+ }
+ if dest.SentinelAddrs == nil {
+ dest.SentinelAddrs = src.SentinelAddrs
+ }
+ if dest.MasterName == "" {
+ dest.MasterName = src.MasterName
+ }
+ if dest.Username == "" {
+ dest.Username = src.Username
+ }
+ if dest.Password == "" {
+ dest.Password = src.Password
+ }
+ if dest.DB == 0 {
+ dest.DB = src.DB
+ }
+ if dest.Expire == 0 {
+ dest.Expire = src.Expire
+ }
+}
+
+type BackoffConfig struct {
+ InitialInterval time.Duration `toml:"initial_interval" default:"30s"`
+ MaxInterval time.Duration `toml:"max_interval" default:"60m"`
+ MaxElapsedTime time.Duration `toml:"max_elapsed_time" default:"2h"`
+}
+type MysqlConfig struct {
+ DSN string `toml:"dsn"`
+ MaxOpenConns int `toml:"max_open_connections"`
+ MaxIdleConns int `toml:"max_idle_connections"`
+}
+
+type StorageConfig struct {
+ Type string `toml:"type"`
+ Local *LocalStorageConfig `toml:"local"`
+ S3 *S3Config `toml:"s3"`
+}
+
+type RBDConfig struct {
+ Username string `toml:"username" json:"username"`
+ Pool string `toml:"pool" json:"pool"`
+ QosBPS int64 `toml:"qos_bps" json:"qosBps"`
+ QosIOPS int64 `toml:"qos_iops" json:"qosIops"`
+ FSID string `toml:"fsid" json:"fsid"`
+ Key string `toml:"key" json:"key"`
+ MonHost string `toml:"mon_host" json:"monHost"`
+}
+
+type LocalStorageConfig struct {
+ BaseDir string `toml:"base_dir"`
+}
+
+type S3Config struct {
+ Endpoint string `toml:"endpoint"`
+ AccessKey string `toml:"access_key"`
+ SecretKey string `toml:"secret_key"`
+ Bucket string `toml:"bucket"`
+ BaseDir string `toml:"base_dir"`
+}
+
+type LogConfig struct {
+ Level string `toml:"level" default:"info"`
+ UseJSON bool `toml:"use_json"`
+ SentryDSN string `toml:"sentry_dsn"`
+ // for file log
+ Filename string `toml:"filename"`
+ MaxSize int `toml:"maxsize" default:"500"`
+ MaxAge int `toml:"max_age" default:"28"`
+ MaxBackups int `toml:"max_backups" default:"3"`
+}
+
+// JWTConfig JWT signingKey info
+type JWTConfig struct {
+ SigningKey string `toml:"key"`
+}
+
+func (c *Config) String() string {
+ bs, _ := json.MarshalIndent(c, "", " ")
+ return string(bs)
+}
+
+func loadConfigFromBytes(cfgBytes []byte) (*Config, error) {
+ cfg := new(Config)
+ defaults.SetDefaults(cfg)
+ err := toml.Unmarshal(cfgBytes, cfg)
+ if err != nil {
+ return nil, err
+ }
+
+ err = checkConfig(cfg)
+ return cfg, err
+}
+
+func Init(p string) (*Config, error) {
+ cfgBytes, err := os.ReadFile(p)
+ if err != nil {
+ return nil, err
+ }
+ if cfg, err = loadConfigFromBytes(cfgBytes); err != nil {
+ return nil, err
+ }
+ if err := cfg.PrepareCephConfig(); err != nil {
+ return nil, err
+ }
+ return cfg, nil
+}
+
+func GetCfg() *Config {
+ return cfg
+}
+
+func checkConfig(cfg *Config) error {
+ // check run mode
+ values := map[string]bool{
+ "debug": true,
+ "test": true,
+ "release": true,
+ }
+ _, ok := values[cfg.Server.RunMode]
+ if !ok {
+ return errors.New("invalid value for run mode, only debug, test and release are allowed")
+ }
+ // check log config
+ return nil
+}
+
+var (
+ //go:embed config.example.toml
+ testConfigStr string
+)
+
+func LoadTestConfig() (*Config, error) {
+ var err error
+ cfg, err = loadConfigFromBytes([]byte(testConfigStr))
+ cfg.Log.Level = "debug"
+ return cfg, err
+}
diff --git a/config/config_test.go b/config/config_test.go
new file mode 100644
index 0000000..cbe5425
--- /dev/null
+++ b/config/config_test.go
@@ -0,0 +1,44 @@
+package config
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestLoadConfig(t *testing.T) {
+ cfgStr := `
+ [server]
+ run_mode = "debug"
+ bind = ":5000"
+ read_timeout = "60s"
+ write_timeout = "5m"
+
+ [log]
+ level = "debug"
+ `
+ cfg, err := loadConfigFromBytes([]byte(cfgStr))
+ assert.Nil(t, err)
+ assert.Equal(t, cfg.Server, ServerConfig{
+ RunMode: "debug",
+ Bind: ":5000",
+ ReadTimeout: 60 * time.Second,
+ WriteTimeout: 5 * time.Minute,
+ })
+
+ _, err = LoadTestConfig()
+ assert.Nil(t, err)
+}
+
+func TestLoadDefaultConfig(t *testing.T) {
+ cfgStr := `
+ [server]
+ [log]
+ `
+ cfg, err := loadConfigFromBytes([]byte(cfgStr))
+ assert.Nil(t, err)
+ assert.Equal(t, cfg.GlobalTimeout, 5*time.Minute)
+ assert.Equal(t, cfg.Server.RunMode, "release")
+ assert.Equal(t, cfg.MaxConcurrency, 10000)
+}
diff --git a/config/templates/ceph.client.eru.keyring b/config/templates/ceph.client.eru.keyring
new file mode 100644
index 0000000..5bd99e2
--- /dev/null
+++ b/config/templates/ceph.client.eru.keyring
@@ -0,0 +1,2 @@
+[client.eru]
+ key = {{ .key }}
diff --git a/config/templates/ceph.conf b/config/templates/ceph.conf
new file mode 100644
index 0000000..f40e2f8
--- /dev/null
+++ b/config/templates/ceph.conf
@@ -0,0 +1,4 @@
+# minimal ceph.conf for f72e4cba-2aef-11ee-91cc-ba899cefe809
+[global]
+ fsid = {{ .fsid }}
+ mon_host = {{range .mon_host }} [v2:{{ . }}:3300/0,v1:{{ . }}:6789/0] {{end}}
diff --git a/config/templates/rbdmap b/config/templates/rbdmap
new file mode 100644
index 0000000..90f235b
--- /dev/null
+++ b/config/templates/rbdmap
@@ -0,0 +1,2 @@
+# RbdDevice Parameters
+# poolname/imagename id=client,keyring=/etc/ceph/ceph.client.keyring
diff --git a/e2e/.gitignore b/e2e/.gitignore
new file mode 100644
index 0000000..ab8b69c
--- /dev/null
+++ b/e2e/.gitignore
@@ -0,0 +1 @@
+config.toml
\ No newline at end of file
diff --git a/e2e/config.toml.example b/e2e/config.toml.example
new file mode 100644
index 0000000..c08125c
--- /dev/null
+++ b/e2e/config.toml.example
@@ -0,0 +1,4 @@
+username = "xxx"
+password = "xxx"
+url = "http://10.200.0.185:8080"
+base_dir = "/tmp/e2e-image"
\ No newline at end of file
diff --git a/e2e/image/image_test.go b/e2e/image/image_test.go
new file mode 100644
index 0000000..be3065c
--- /dev/null
+++ b/e2e/image/image_test.go
@@ -0,0 +1,85 @@
+package image
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "os"
+ "strconv"
+ "testing"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+ libimage "github.com/projecteru2/vmihub/client/image"
+ libtypes "github.com/projecteru2/vmihub/client/types"
+ "github.com/projecteru2/vmihub/client/util"
+ e2etypes "github.com/projecteru2/vmihub/e2e/types"
+ "github.com/projecteru2/vmihub/pkg/types"
+)
+
+var (
+ configFile string
+)
+
+func init() {
+ flag.StringVar(&configFile, "config", "./config.toml", "config file")
+}
+
+var _ = BeforeSuite(func() {
+})
+
+var _ = AfterSuite(func() {
+})
+
+func TestGuest(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Image Suite")
+}
+
+func newImageAPI(chunkSize int64, threshold int64) (libimage.API, string) {
+ cfg, err := e2etypes.LoadConfig(configFile)
+ Expect(err).To(BeNil())
+ cred := &libtypes.Credential{
+ Username: cfg.Username,
+ Password: cfg.Password,
+ }
+ err = util.EnsureDir(cfg.BaseDir)
+ Expect(err).To(BeNil())
+ baseDir, err := os.MkdirTemp(cfg.BaseDir, "e2e-test")
+ Expect(err).To(BeNil())
+
+ var opts []libimage.Option
+ if chunkSize > 0 {
+ opts = append(opts, libimage.WithChunSize(strconv.FormatInt(chunkSize, 10)))
+ }
+ if threshold > 0 {
+ opts = append(opts, libimage.WithChunkThreshold(strconv.FormatInt(threshold, 10)))
+ }
+ imageAPI, err := libimage.NewAPI(cfg.URL, baseDir, cred, opts...)
+ Expect(err).To(BeNil())
+ return imageAPI, baseDir
+}
+
+func createImage(ctx context.Context, imageAPI libimage.API, user, name, tag string, contentByte byte, sz int64) *libtypes.Image {
+ fullname := fmt.Sprintf("%s/%s:%s", user, name, tag)
+ testImg, err := imageAPI.NewImage(fullname)
+ Expect(err).To(BeNil())
+
+ testImg.Format = "qcow2"
+ testImg.OS = types.OSInfo{
+ Type: "linux",
+ Distrib: "ubuntu",
+ Version: "20.04",
+ Arch: "amd64",
+ }
+ fname, err := prepareFile(sz, contentByte)
+ Expect(err).To(BeNil())
+ defer os.Remove(fname)
+
+ err = testImg.CopyFrom(fname)
+ Expect(err).To(BeNil())
+
+ err = imageAPI.Push(ctx, testImg, false)
+ Expect(err).To(BeNil())
+ return testImg
+}
diff --git a/e2e/image/pull_test.go b/e2e/image/pull_test.go
new file mode 100644
index 0000000..214b8be
--- /dev/null
+++ b/e2e/image/pull_test.go
@@ -0,0 +1,178 @@
+package image
+
+import (
+ "context"
+ "os"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+ libimage "github.com/projecteru2/vmihub/client/image"
+ e2etypes "github.com/projecteru2/vmihub/e2e/types"
+ utils "github.com/projecteru2/vmihub/internal/utils"
+)
+
+var _ = Describe("Pull image", func() {
+ Describe("With single file", func() {
+ It("Successfully", func() {
+ imageAPI, baseDir := newImageAPI(0, 0)
+ defer os.RemoveAll(baseDir)
+
+ cfg, err := e2etypes.LoadConfig(configFile)
+ Expect(err).To(BeNil())
+ testCases := []struct {
+ user string
+ name string
+ tag string
+ size int64
+ }{
+ {
+ user: cfg.Username,
+ name: "pull-test-image",
+ tag: "test-tag",
+ size: 1024 * 1024,
+ },
+ {
+ user: cfg.Username,
+ name: "pull-test-image2",
+ tag: "latest",
+ size: 1024 * 1024,
+ },
+ }
+ for _, tc := range testCases {
+ ctx := context.TODO()
+ testImg := createImage(ctx, imageAPI, tc.user, tc.name, tc.tag, 'a', tc.size)
+ defer func() {
+ err = imageAPI.RemoveImage(ctx, testImg)
+ Expect(err).To(BeNil())
+ }()
+ err = imageAPI.RemoveLocalImage(ctx, testImg)
+ Expect(err).To(BeNil())
+ cached, err := testImg.Cached()
+ Expect(err).To(BeNil())
+ Expect(cached).To(BeFalse())
+
+ newImg, err := imageAPI.Pull(ctx, testImg.Fullname(), libimage.PullPolicyAlways)
+ Expect(err).To(BeNil())
+ Expect(newImg.Username).To(Equal(testImg.Username))
+ Expect(newImg.Name).To(Equal(testImg.Name))
+ Expect(newImg.Tag).To(Equal(utils.NormalizeTag(tc.tag, newImg.Digest)))
+
+ cached1, err := testImg.Cached()
+ Expect(err).To(BeNil())
+
+ cached2, err := newImg.Cached()
+ Expect(err).To(BeNil())
+ if tc.tag == "" || tc.tag == "latest" {
+ Expect(cached1).To(BeFalse())
+ Expect(cached2).To(BeTrue())
+ } else {
+ Expect(cached1).To(BeTrue())
+ Expect(cached2).To(BeTrue())
+ }
+ }
+ })
+ It("latest successfully", func() {
+ imageAPI, baseDir := newImageAPI(0, 0)
+ defer os.RemoveAll(baseDir)
+
+ cfg, err := e2etypes.LoadConfig(configFile)
+ Expect(err).To(BeNil())
+ ctx := context.TODO()
+ name := "pull-test-latest-tag"
+ for idx := 0; idx < 3; idx++ {
+ testImg := createImage(ctx, imageAPI, cfg.Username, name, "latest", byte(idx+65), 1024*1024)
+ defer func() {
+ err = imageAPI.RemoveImage(ctx, testImg)
+ Expect(err).To(BeNil())
+ }()
+ digest := testImg.Digest
+ err = imageAPI.RemoveLocalImage(ctx, testImg)
+ Expect(err).To(BeNil())
+ cached, err := testImg.Cached()
+ Expect(err).To(BeNil())
+ Expect(cached).To(BeFalse())
+
+ newImg, err := imageAPI.Pull(ctx, testImg.Fullname(), libimage.PullPolicyAlways)
+ Expect(err).To(BeNil())
+ Expect(newImg.Username).To(Equal(testImg.Username))
+ Expect(newImg.Name).To(Equal(testImg.Name))
+ Expect(newImg.Tag).To(Equal(utils.NormalizeTag("latest", newImg.Digest)))
+ Expect(newImg.Digest).To(Equal(digest))
+
+ cached, err = testImg.Cached()
+ Expect(err).To(BeNil())
+ Expect(cached).To(BeFalse())
+
+ cached, err = newImg.Cached()
+ Expect(err).To(BeNil())
+ Expect(cached).To(BeTrue())
+ }
+ })
+ It("failed", func() {
+ })
+ })
+ Describe("With chunk", func() {
+ It("Successfully", func() {
+ imageAPI, baseDir := newImageAPI(6*1024*1024, 10*1024*1024)
+ _ = baseDir
+ defer os.RemoveAll(baseDir)
+
+ cfg, err := e2etypes.LoadConfig(configFile)
+ Expect(err).To(BeNil())
+ testCases := []struct {
+ user string
+ name string
+ tag string
+ size int64
+ }{
+ {
+ user: cfg.Username,
+ name: "pull-test-image-chunk",
+ tag: "test-tag2",
+ size: 13 * 1024 * 1024,
+ },
+ {
+ user: cfg.Username,
+ name: "pull-test-image-chunk2",
+ tag: "latest",
+ size: 13 * 1024 * 1024,
+ },
+ }
+ for _, tc := range testCases {
+ ctx := context.TODO()
+ testImg := createImage(ctx, imageAPI, tc.user, tc.name, tc.tag, 'a', tc.size)
+ defer func() {
+ err = imageAPI.RemoveImage(ctx, testImg)
+ Expect(err).To(BeNil())
+ }()
+ err = imageAPI.RemoveLocalImage(ctx, testImg)
+ Expect(err).To(BeNil())
+ cached, err := testImg.Cached()
+ Expect(err).To(BeNil())
+ Expect(cached).To(BeFalse())
+
+ newImg, err := imageAPI.Pull(ctx, testImg.Fullname(), libimage.PullPolicyAlways)
+ Expect(err).To(BeNil())
+ Expect(newImg.Username).To(Equal(testImg.Username))
+ Expect(newImg.Name).To(Equal(testImg.Name))
+ Expect(newImg.Tag).To(Equal(utils.NormalizeTag(tc.tag, newImg.Digest)))
+
+ cached1, err := testImg.Cached()
+ Expect(err).To(BeNil())
+
+ cached2, err := newImg.Cached()
+ Expect(err).To(BeNil())
+
+ if tc.tag == "" || tc.tag == "latest" {
+ Expect(cached1).To(BeFalse())
+ Expect(cached2).To(BeTrue())
+ } else {
+ Expect(cached1).To(BeTrue())
+ Expect(cached2).To(BeTrue())
+ }
+ }
+ })
+ It("failed", func() {
+ })
+ })
+})
diff --git a/e2e/image/push_test.go b/e2e/image/push_test.go
new file mode 100644
index 0000000..0577750
--- /dev/null
+++ b/e2e/image/push_test.go
@@ -0,0 +1,93 @@
+package image
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+ e2etypes "github.com/projecteru2/vmihub/e2e/types"
+ utils "github.com/projecteru2/vmihub/internal/utils"
+ "github.com/projecteru2/vmihub/pkg/types"
+)
+
+var _ = Describe("Push image", func() {
+ Describe("With single file", func() {
+ It("Successfully", func() {
+ imageAPI, baseDir := newImageAPI(6*1024*1024, 10*1024*1024)
+ defer os.RemoveAll(baseDir)
+
+ cfg, err := e2etypes.LoadConfig(configFile)
+ Expect(err).To(BeNil())
+
+ testCases := []struct {
+ user string
+ name string
+ tag string
+ size int64
+ }{
+ {
+ user: cfg.Username,
+ name: "push-test-image",
+ tag: "test-tag",
+ size: 1024 * 1024,
+ },
+ {
+ user: cfg.Username,
+ name: "push-test-image2",
+ tag: "latest",
+ size: 1024 * 1024,
+ },
+ // upload with chunk
+ {
+ user: cfg.Username,
+ name: "push-test-image-chunk",
+ tag: "test-tag",
+ size: 13 * 1024 * 1024,
+ },
+ {
+ user: cfg.Username,
+ name: "push-test-image-chunk2",
+ tag: "latest",
+ size: 13 * 1024 * 1024,
+ },
+ }
+ for _, tc := range testCases {
+ ctx := context.TODO()
+ fullname := fmt.Sprintf("%s/%s:%s", tc.user, tc.name, tc.tag)
+ testImg, err := imageAPI.NewImage(fullname)
+ Expect(err).To(BeNil())
+ testImg.Format = "qcow2"
+ testImg.OS = types.OSInfo{
+ Type: "linux",
+ Distrib: "ubuntu",
+ Version: "20.04",
+ Arch: "amd64",
+ }
+
+ fname, err := prepareFile(tc.size, 'a')
+ Expect(err).To(BeNil())
+ defer os.Remove(fname)
+
+ err = testImg.CopyFrom(fname)
+ Expect(err).To(BeNil())
+
+ err = imageAPI.Push(ctx, testImg, false)
+ Expect(err).To(BeNil())
+ defer func() {
+ err = imageAPI.RemoveImage(ctx, testImg)
+ Expect(err).To(BeNil())
+ }()
+ info, err := imageAPI.GetInfo(ctx, fullname)
+ Expect(err).To(BeNil())
+ Expect(info.Digest).To(Equal(testImg.Digest))
+ Expect(info.Username).To(Equal(tc.user))
+ Expect(info.Name).To(Equal(tc.name))
+ Expect(info.Tag).To(Equal(utils.NormalizeTag(tc.tag, info.Digest)))
+ }
+ })
+ It("failed", func() {
+ })
+ })
+})
diff --git a/e2e/image/utils.go b/e2e/image/utils.go
new file mode 100644
index 0000000..c48d33a
--- /dev/null
+++ b/e2e/image/utils.go
@@ -0,0 +1,47 @@
+package image
+
+import (
+ "bytes"
+ "os"
+)
+
+func generateFile(filename string, sizeInBytes int64, b byte) error {
+ file, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ // Write bytes to the file until it reaches the specified size
+ var bytesWritten int64
+ for bytesWritten < sizeInBytes {
+ bytesToWrite := sizeInBytes - bytesWritten
+ if bytesToWrite > 1024 {
+ bytesToWrite = 1024 // Write at most 1 KB at a time
+ }
+ buf := bytes.Repeat([]byte{b}, int(bytesToWrite))
+ n, err := file.Write(buf)
+ if err != nil {
+ return err
+ }
+ bytesWritten += int64(n)
+ }
+
+ return nil
+}
+
+func generateTempFilename() string {
+ // Generate a temporary filename
+ tempFile, err := os.CreateTemp("/tmp", "example")
+ if err != nil {
+ panic(err)
+ }
+ defer os.Remove(tempFile.Name()) // Remove the temporary file immediately
+ return tempFile.Name()
+}
+
+func prepareFile(sz int64, b byte) (string, error) {
+ fname := generateTempFilename()
+ err := generateFile(fname, sz, b)
+ return fname, err
+}
diff --git a/e2e/types/config.go b/e2e/types/config.go
new file mode 100644
index 0000000..59c6c4b
--- /dev/null
+++ b/e2e/types/config.go
@@ -0,0 +1,44 @@
+package types
+
+import (
+ "errors"
+ "os"
+
+ "github.com/BurntSushi/toml"
+ "github.com/mcuadros/go-defaults"
+)
+
+type Config struct {
+ URL string `toml:"url"`
+ BaseDir string `toml:"base_dir"`
+ Username string `toml:"username"`
+ Password string `toml:"password"`
+}
+
+func LoadConfig(fname string) (*Config, error) {
+ cfgBytes, err := os.ReadFile(fname)
+ if err != nil {
+ return nil, err
+ }
+ cfg := new(Config)
+ defaults.SetDefaults(cfg)
+ err = toml.Unmarshal(cfgBytes, cfg)
+ if err != nil {
+ return nil, err
+ }
+ err = checkConfig(cfg)
+ return cfg, err
+}
+
+func checkConfig(cfg *Config) error {
+ if cfg.Username == "" {
+ return errors.New("username is required")
+ }
+ if cfg.Password == "" {
+ return errors.New("password is required")
+ }
+ if cfg.URL == "" {
+ return errors.New("url is required")
+ }
+ return nil
+}
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..9859ee0
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,117 @@
+module github.com/projecteru2/vmihub
+
+go 1.22
+
+toolchain go1.22.3
+
+require (
+ github.com/BurntSushi/toml v1.3.2
+ github.com/DATA-DOG/go-sqlmock v1.5.0
+ github.com/alicebob/miniredis/v2 v2.32.1
+ github.com/aws/aws-sdk-go v1.51.16
+ github.com/btcsuite/btcutil v1.0.2
+ github.com/cenkalti/backoff/v4 v4.2.1
+ github.com/cockroachdb/errors v1.11.1
+ github.com/dgrijalva/jwt-go v3.2.0+incompatible
+ github.com/duke-git/lancet v1.4.3
+ github.com/dustin/go-humanize v1.0.1
+ github.com/gin-contrib/cors v1.4.0
+ github.com/gin-contrib/i18n v1.1.1
+ github.com/gin-contrib/sessions v1.0.0
+ github.com/gin-gonic/gin v1.9.1
+ github.com/go-playground/validator/v10 v10.19.0
+ github.com/go-redsync/redsync/v4 v4.13.0
+ github.com/go-sql-driver/mysql v1.7.1
+ github.com/google/uuid v1.5.0
+ github.com/jmoiron/sqlx v1.3.5
+ github.com/johannesboyne/gofakes3 v0.0.0-20240217095638-c55a48f17be6
+ github.com/mcuadros/go-defaults v1.2.0
+ github.com/mitchellh/mapstructure v1.5.0
+ github.com/onsi/ginkgo/v2 v2.13.2
+ github.com/onsi/gomega v1.29.0
+ github.com/panjf2000/ants/v2 v2.7.3
+ github.com/pelletier/go-toml v1.9.5
+ github.com/pkg/errors v0.9.1
+ github.com/projecteru2/core v0.0.0-20240614132727-08e4fbc219d1
+ github.com/rbcervilla/redisstore/v9 v9.0.0
+ github.com/redis/go-redis/v9 v9.5.1
+ github.com/rs/zerolog v1.30.0
+ github.com/samber/lo v1.39.0
+ github.com/stretchr/testify v1.9.0
+ github.com/swaggo/files v1.0.1
+ github.com/swaggo/gin-swagger v1.6.0
+ github.com/swaggo/swag v1.16.3
+ github.com/urfave/cli/v2 v2.27.1
+ go.etcd.io/bbolt v1.3.8
+ golang.org/x/crypto v0.23.0
+ golang.org/x/text v0.15.0
+ gopkg.in/yaml.v3 v3.0.1
+)
+
+require (
+ github.com/KyleBanks/depth v1.2.1 // indirect
+ github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 // indirect
+ github.com/alphadose/haxmap v1.3.1 // indirect
+ github.com/bytedance/sonic v1.11.3 // indirect
+ github.com/cespare/xxhash/v2 v2.2.0 // indirect
+ github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect
+ github.com/chenzhuoyu/iasm v0.9.1 // indirect
+ github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
+ github.com/cockroachdb/redact v1.1.5 // indirect
+ github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
+ github.com/gabriel-vasile/mimetype v1.4.3 // indirect
+ github.com/getsentry/sentry-go v0.23.0 // indirect
+ github.com/gin-contrib/sse v0.1.0 // indirect
+ github.com/go-logr/logr v1.4.1 // indirect
+ github.com/go-openapi/jsonpointer v0.21.0 // indirect
+ github.com/go-openapi/jsonreference v0.21.0 // indirect
+ github.com/go-openapi/spec v0.21.0 // indirect
+ github.com/go-openapi/swag v0.23.0 // indirect
+ github.com/go-playground/locales v0.14.1 // indirect
+ github.com/go-playground/universal-translator v0.18.1 // indirect
+ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
+ github.com/goccy/go-json v0.10.2 // indirect
+ github.com/gogo/protobuf v1.3.2 // indirect
+ github.com/golang/protobuf v1.5.4 // indirect
+ github.com/google/go-cmp v0.6.0 // indirect
+ github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 // indirect
+ github.com/gorilla/context v1.1.2 // indirect
+ github.com/gorilla/securecookie v1.1.2 // indirect
+ github.com/gorilla/sessions v1.2.2 // indirect
+ github.com/hashicorp/errwrap v1.1.0 // indirect
+ github.com/hashicorp/go-multierror v1.1.1 // indirect
+ github.com/jmespath/go-jmespath v0.4.0 // indirect
+ github.com/josharian/intern v1.0.0 // indirect
+ github.com/json-iterator/go v1.1.12 // indirect
+ github.com/klauspost/cpuid/v2 v2.2.7 // indirect
+ github.com/kr/pretty v0.3.1 // indirect
+ github.com/kr/text v0.2.0 // indirect
+ github.com/leodido/go-urn v1.4.0 // indirect
+ github.com/mailru/easyjson v0.7.7 // indirect
+ github.com/mattn/go-colorable v0.1.13 // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
+ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+ github.com/modern-go/reflect2 v1.0.2 // indirect
+ github.com/nicksnyder/go-i18n/v2 v2.4.0 // indirect
+ github.com/pelletier/go-toml/v2 v2.2.0 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/rogpeppe/go-internal v1.11.0 // indirect
+ github.com/russross/blackfriday/v2 v2.1.0 // indirect
+ github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect
+ github.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500 // indirect
+ github.com/stretchr/objx v0.5.2 // indirect
+ github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
+ github.com/ugorji/go/codec v1.2.12 // indirect
+ github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect
+ github.com/yuin/gopher-lua v1.1.1 // indirect
+ golang.org/x/arch v0.7.0 // indirect
+ golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df // indirect
+ golang.org/x/net v0.25.0 // indirect
+ golang.org/x/sys v0.20.0 // indirect
+ golang.org/x/tools v0.21.0 // indirect
+ google.golang.org/grpc v1.60.1 // indirect
+ google.golang.org/protobuf v1.33.0 // indirect
+ gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..5554e6a
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,433 @@
+github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
+github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
+github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
+github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
+github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
+github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
+github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
+github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 h1:uvdUDbHQHO85qeSydJtItA4T55Pw6BtAejd0APRJOCE=
+github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
+github.com/alicebob/miniredis/v2 v2.32.1 h1:Bz7CciDnYSaa0mX5xODh6GUITRSx+cVhjNoOR4JssBo=
+github.com/alicebob/miniredis/v2 v2.32.1/go.mod h1:AqkLNAfUm0K07J28hnAyyQKf/x0YkCY/g5DCtuL01Mw=
+github.com/alphadose/haxmap v1.3.1 h1:KmZh75duO1tC8pt3LmUwoTYiZ9sh4K52FX8p7/yrlqU=
+github.com/alphadose/haxmap v1.3.1/go.mod h1:rjHw1IAqbxm0S3U5tD16GoKsiAd8FWx5BJ2IYqXwgmM=
+github.com/aws/aws-sdk-go v1.44.256/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
+github.com/aws/aws-sdk-go v1.51.16 h1:vnWKK8KjbftEkuPX8bRj3WHsLy1uhotn0eXptpvrxJI=
+github.com/aws/aws-sdk-go v1.51.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
+github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
+github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
+github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
+github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
+github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
+github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
+github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
+github.com/btcsuite/btcutil v1.0.2 h1:9iZ1Terx9fMIOtq1VrwdqfsATL9MC2l8ZrUY6YZ2uts=
+github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts=
+github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
+github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
+github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
+github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
+github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
+github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
+github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ3gFZdAHF7NM=
+github.com/bytedance/sonic v1.11.3 h1:jRN+yEjakWh8aK5FzrciUHG8OFXK+4/KrAX/ysEtHAA=
+github.com/bytedance/sonic v1.11.3/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4=
+github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
+github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
+github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
+github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ0g/qfRdp61a3Uu/AWrgIq2s0ClJV1g0=
+github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA=
+github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog=
+github.com/chenzhuoyu/iasm v0.9.1 h1:tUHQJXo3NhBqw6s33wkGn9SP3bvrWLdlVIJ3hQBL7P0=
+github.com/chenzhuoyu/iasm v0.9.1/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8=
+github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw=
+github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
+github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
+github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
+github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
+github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
+github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
+github.com/duke-git/lancet v1.4.3 h1:cWcaT1aBtEbYbYAxmQ8wP+B4K6X1NpMn6nyjD8giQA8=
+github.com/duke-git/lancet v1.4.3/go.mod h1:Grr6ehF0ig2nRIjeb+NmcxiJ12mkML4XQAx95tlQeJU=
+github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
+github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
+github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk=
+github.com/getsentry/sentry-go v0.23.0 h1:dn+QRCeJv4pPt9OjVXiMcGIBIefaTJPw/h0bZWO05nE=
+github.com/getsentry/sentry-go v0.23.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
+github.com/gin-contrib/cors v1.4.0 h1:oJ6gwtUl3lqV0WEIwM/LxPF1QZ5qe2lGWdY2+bz7y0g=
+github.com/gin-contrib/cors v1.4.0/go.mod h1:bs9pNM0x/UsmHPBWT2xZz9ROh8xYjYkiURUfmBoMlcs=
+github.com/gin-contrib/gzip v0.0.6 h1:NjcunTcGAj5CO1gn4N8jHOSIeRFHIbn51z6K+xaN4d4=
+github.com/gin-contrib/gzip v0.0.6/go.mod h1:QOJlmV2xmayAjkNS2Y8NQsMneuRShOU/kjovCXNuzzk=
+github.com/gin-contrib/i18n v1.1.1 h1:2wwCkrU/Zsu91fwZBhpXAPTQbHnjengAs7TNLbtJmew=
+github.com/gin-contrib/i18n v1.1.1/go.mod h1:BAC1NwrM9ApRxS5zehG/OSoM3DTXqTVoJs+6VTdnIDo=
+github.com/gin-contrib/sessions v1.0.0 h1:r5GLta4Oy5xo9rAwMHx8B4wLpeRGHMdz9NafzJAdP8Y=
+github.com/gin-contrib/sessions v1.0.0/go.mod h1:DN0f4bvpqMQElDdi+gNGScrP2QEI04IErRyMFyorUOI=
+github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
+github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
+github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=
+github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
+github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
+github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
+github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
+github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
+github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
+github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
+github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
+github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
+github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY=
+github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk=
+github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
+github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
+github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
+github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
+github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
+github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
+github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
+github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
+github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
+github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
+github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
+github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos=
+github.com/go-playground/validator/v10 v10.19.0 h1:ol+5Fu+cSq9JD7SoSqe04GMI92cbn0+wvQ3bZ8b/AU4=
+github.com/go-playground/validator/v10 v10.19.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
+github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg=
+github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
+github.com/go-redis/redis/v7 v7.4.1 h1:PASvf36gyUpr2zdOUS/9Zqc80GbM+9BDyiJSJDDOrTI=
+github.com/go-redis/redis/v7 v7.4.1/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
+github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
+github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
+github.com/go-redsync/redsync/v4 v4.13.0 h1:49X6GJfnbLGaIpBBREM/zA4uIMDXKAh1NDkvQ1EkZKA=
+github.com/go-redsync/redsync/v4 v4.13.0/go.mod h1:HMW4Q224GZQz6x1Xc7040Yfgacukdzu7ifTDAKiyErQ=
+github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
+github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
+github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
+github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0=
+github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
+github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE=
+github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
+github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/gorilla/context v1.1.2 h1:WRkNAv2uoa03QNIc1A6u4O7DAGMUVoopZhkiXWA2V1o=
+github.com/gorilla/context v1.1.2/go.mod h1:KDPwT9i/MeWHiLl90fuTgrt4/wPcv75vFAZLaOOcbxM=
+github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA=
+github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo=
+github.com/gorilla/sessions v1.2.2 h1:lqzMYz6bOfvn2WriPUjNByzeXIlVzURcPmgMczkmTjY=
+github.com/gorilla/sessions v1.2.2/go.mod h1:ePLdVu+jbEgHH+KWw8I1z2wqd0BAdAQh/8LRvBeoNcQ=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
+github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
+github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
+github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
+github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
+github.com/johannesboyne/gofakes3 v0.0.0-20240217095638-c55a48f17be6 h1:W8heH5NR7dfdB4FehSFI+DxjCbVKe9fPkPqKzCPJwnM=
+github.com/johannesboyne/gofakes3 v0.0.0-20240217095638-c55a48f17be6/go.mod h1:AxgWC4DDX54O2WDoQO1Ceabtn6IbktjU/7bigor+66g=
+github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
+github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
+github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
+github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
+github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
+github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
+github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
+github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
+github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
+github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
+github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
+github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
+github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
+github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
+github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
+github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
+github.com/mcuadros/go-defaults v1.2.0 h1:FODb8WSf0uGaY8elWJAkoLL0Ri6AlZ1bFlenk56oZtc=
+github.com/mcuadros/go-defaults v1.2.0/go.mod h1:WEZtHEVIGYVDqkKSWBdWKUVdRyKlMfulPaGDWIVeCWY=
+github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/nicksnyder/go-i18n/v2 v2.4.0 h1:3IcvPOAvnCKwNm0TB0dLDTuawWEj+ax/RERNC+diLMM=
+github.com/nicksnyder/go-i18n/v2 v2.4.0/go.mod h1:nxYSZE9M0bf3Y70gPQjN9ha7XNHX7gMc814+6wVyEI4=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo/v2 v2.13.2 h1:Bi2gGVkfn6gQcjNjZJVO8Gf0FHzMPf2phUei9tejVMs=
+github.com/onsi/ginkgo/v2 v2.13.2/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg=
+github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
+github.com/panjf2000/ants/v2 v2.7.3 h1:rHQ0hH0DQvuNUqqlWIMJtkMcDuL1uQAfpX2mIhQ5/s0=
+github.com/panjf2000/ants/v2 v2.7.3/go.mod h1:KIBmYG9QQX5U2qzFP/yQJaq/nSb6rahS9iEHkrCMgM8=
+github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
+github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
+github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
+github.com/pelletier/go-toml/v2 v2.2.0 h1:QLgLl2yMN7N+ruc31VynXs1vhMZa7CeHHejIeBAsoHo=
+github.com/pelletier/go-toml/v2 v2.2.0/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
+github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
+github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/projecteru2/core v0.0.0-20240614132727-08e4fbc219d1 h1:ckh4IsnppXEbe9vb3Au4lKO5Z7ZNqanNBLdWViBdvxI=
+github.com/projecteru2/core v0.0.0-20240614132727-08e4fbc219d1/go.mod h1:JDOLwVw4EdLTk+bqI/LdU4Ix/Wl6BaaHMzaOO5vpU8U=
+github.com/rbcervilla/redisstore/v9 v9.0.0 h1:wOPbBaydbdxzi1gTafDftCI/Z7vnsXw0QDPCuhiMG0g=
+github.com/rbcervilla/redisstore/v9 v9.0.0/go.mod h1:q/acLpoKkTZzIsBYt0R4THDnf8W/BH6GjQYvxDSSfdI=
+github.com/redis/go-redis/v9 v9.5.1 h1:H1X4D3yHPaYrkL5X06Wh6xNVM/pX0Ft4RV0vMGvLBh8=
+github.com/redis/go-redis/v9 v9.5.1/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M=
+github.com/redis/rueidis v1.0.19 h1:s65oWtotzlIFN8eMPhyYwxlwLR1lUdhza2KtWprKYSo=
+github.com/redis/rueidis v1.0.19/go.mod h1:8B+r5wdnjwK3lTFml5VtxjzGOQAC+5UmujoD12pDrEo=
+github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
+github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
+github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
+github.com/rs/zerolog v1.30.0 h1:SymVODrcRsaRaSInD9yQtKbtWqwsfoPcRff/oRXLj4c=
+github.com/rs/zerolog v1.30.0/go.mod h1:/tk+P47gFdPXq4QYjvCmT5/Gsug2nagsFWBWhAiSi1w=
+github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=
+github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
+github.com/samber/lo v1.39.0 h1:4gTz1wUhNYLhFSKl6O+8peW0v2F4BCY034GRpU9WnuA=
+github.com/samber/lo v1.39.0/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA=
+github.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500 h1:WnNuhiq+FOY3jNj6JXFT+eLN3CQ/oPIsDPRanvwsmbI=
+github.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500/go.mod h1:+njLrG5wSeoG4Ds61rFgEzKvenR2UHbjMoDHsczxly0=
+github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203 h1:QVqDTf3h2WHt08YuiTGPZLls0Wq99X9bWd0Q5ZSBesM=
+github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203/go.mod h1:oqN97ltKNihBbwlX8dLpwxCl3+HnXKV/R0e+sRLd9C8=
+github.com/swaggo/files v1.0.1 h1:J1bVJ4XHZNq0I46UU90611i9/YzdrF7x92oX1ig5IdE=
+github.com/swaggo/files v1.0.1/go.mod h1:0qXmMNH6sXNf+73t65aKeB+ApmgxdnkQzVTAj2uaMUg=
+github.com/swaggo/gin-swagger v1.6.0 h1:y8sxvQ3E20/RCyrXeFfg60r6H0Z+SwpTjMYsMm+zy8M=
+github.com/swaggo/gin-swagger v1.6.0/go.mod h1:BG00cCEy294xtVpyIAHG6+e2Qzj/xKlRdOqDkvq0uzo=
+github.com/swaggo/swag v1.16.3 h1:PnCYjPCah8FK4I26l2F/KQ4yz3sILcVUN3cTlBFA9Pg=
+github.com/swaggo/swag v1.16.3/go.mod h1:DImHIuOFXKpMFAQjcC7FG4m3Dg4+QuUgUzJmKjI/gRk=
+github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
+github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
+github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
+github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
+github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE=
+github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
+github.com/urfave/cli/v2 v2.27.1 h1:8xSQ6szndafKVRmfyeUMxkNUJQMjL1F2zmsZ+qHpfho=
+github.com/urfave/cli/v2 v2.27.1/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
+github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 h1:+qGGcbkzsfDQNPPe9UDgpxAWQrhbbBXOYJFQDq/dtJw=
+github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQutDLsQv2Deui4iYQ6DWTxR14g6m8Wv88+Xqk=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M=
+github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw=
+go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
+go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA=
+go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
+go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
+go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
+golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
+golang.org/x/arch v0.7.0 h1:pskyeJh/3AmoQ8CPE95vxHLqp1G1GfGNXTmcl9NEKTc=
+golang.org/x/arch v0.7.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
+golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
+golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
+golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df h1:UA2aFVmmsIlefxMk29Dp2juaUSth8Pyn3Tq5Y5mJGME=
+golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
+golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
+golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
+golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
+golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
+golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
+golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190829051458-42f498d34c4d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
+golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw=
+golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 h1:SeZZZx0cP0fqUyA+oRzP9k7cSwJlvDFiROO72uwD6i0=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 h1:6GQBEOdGkX6MMTLT9V+TjtIRZCw9VPD5Z+yHY9wMgS0=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY=
+google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU=
+google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
+google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
+rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
diff --git a/internal/api/image/chunk.go b/internal/api/image/chunk.go
new file mode 100644
index 0000000..6bcfdfe
--- /dev/null
+++ b/internal/api/image/chunk.go
@@ -0,0 +1,619 @@
+package image
+
+import (
+ "crypto/sha256"
+ "encoding/json"
+ "fmt"
+ "io"
+ "math"
+ "net/http"
+ "os"
+ "strconv"
+ "time"
+
+ "github.com/dustin/go-humanize"
+ "github.com/gin-gonic/gin"
+ "github.com/gin-gonic/gin/binding"
+ "github.com/mcuadros/go-defaults"
+
+ "github.com/projecteru2/core/log"
+ "github.com/projecteru2/vmihub/internal/common"
+ "github.com/projecteru2/vmihub/internal/models"
+ storFact "github.com/projecteru2/vmihub/internal/storage/factory"
+ stotypes "github.com/projecteru2/vmihub/internal/storage/types"
+ "github.com/projecteru2/vmihub/internal/utils"
+ "github.com/projecteru2/vmihub/pkg/terrors"
+ "github.com/projecteru2/vmihub/pkg/types"
+ "github.com/redis/go-redis/v9"
+)
+
+const (
+ redisInfoKey = "/vmihub/chunk/info/%s"
+ redisSliceKey = "/vmihub/chunk/slice/%s"
+
+ redisImageHKey = "image"
+ redisForceHKey = "force"
+ redisSizeHKey = "chunkSize"
+ redisDigestHkey = "digest"
+ redisChunkNumHkey = "nChunks"
+
+ chunkRedisExpire = 60 * 60 * time.Second
+ defaultChunkSize = "50M" // 1024 * 1024 * 50
+)
+
+// DownloadImageChunk download image chunk
+//
+// @Summary download image chunk
+// @Description DownloadImageChunk download image chunk
+// @Tags 镜像管理
+// @Accept json
+// @Produce json
+// @Param Authorization header string true "token"
+// @Param username path string true "仓库用户名"
+// @Param name path string true "仓库名"
+// @Param chunkIdx path int true "分片序号"
+// @Param tag query string false "标签" default("latest")
+// @Param chunkSize query string false "分片大小" default("50M")
+// @Success 200
+// @Router /image/{username}/{name}/chunk/{chunkIdx}/download [get]
+func DownloadImageChunk(c *gin.Context) {
+ username := c.Param("username")
+ name := c.Param("name")
+ chunkIdx := c.Param("chunkIdx")
+ tag := validateParamTag(c.Query("tag"))
+ chunkSizeStr := c.DefaultQuery("chunkSize", defaultChunkSize)
+ cIdx, _ := strconv.Atoi(chunkIdx) //nolint:nolintlint,errcheck
+
+ if err := validateRepoName(username, name); err != nil {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "invalid name"})
+ return
+ }
+
+ chunkSize, err := humanize.ParseBytes(chunkSizeStr)
+ if err != nil {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "invalid chunk size"})
+ return
+ }
+
+ // check upload image is exist in db
+ repo, err := getRepo(c, username, name, "read")
+ if err != nil {
+ return
+ }
+ img, err := getRepoImage(c, repo, tag)
+ if err != nil {
+ return
+ }
+
+ if img.Format == models.ImageFormatRBD {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "image created from system disk doesn't support download"})
+ return
+ }
+ sliceNum := uint64(math.Ceil(float64(img.Size) / float64(chunkSize)))
+ if uint64(cIdx) > sliceNum {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
+ "error": fmt.Sprintf("sliceIndex: %d, out of range\n", cIdx),
+ })
+ return
+ }
+ sto := storFact.Instance()
+ offset := int64(uint64(cIdx) * chunkSize)
+ rc, err := sto.SeekRead(c, img.Fullname(), offset)
+ if err != nil {
+ log.WithFunc("DownloadImageChunk").Error(c, err, "failed to get seek reader")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "internal error, please try again.",
+ })
+ return
+ }
+ defer rc.Close()
+
+ contentSize := chunkSize
+ if offset+int64(contentSize) > img.Size {
+ contentSize = uint64(img.Size) - uint64(offset)
+ }
+ c.Header("Content-Type", "application/octet-stream")
+ c.Header("Content-Disposition", "attachment; filename="+img.SliceName())
+ c.Header("Content-Length", fmt.Sprintf("%d", contentSize))
+
+ reader := io.LimitReader(rc, int64(contentSize))
+ // write content to response
+ _, err = io.Copy(c.Writer, reader)
+ if err != nil {
+ log.WithFunc("DownloadImageChunk").Error(c, err, "Failed to download file")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "Failed to download file"})
+ return
+ }
+}
+
+// StartImageChunkUpload start chunk upload session
+//
+// @Summary upload image chunk
+// @Description UploadImageChunk upload image chunk
+// @Tags 镜像管理
+// @Accept json
+// @Produce json
+// @Param Authorization header string true "token"
+// @Param username path string true "用户名"
+// @Param name path string true "镜像名"
+// @Param force query bool false "强制上传(覆盖)" default("false")
+// @Param chunkSize query int true "chunk大小"
+// @Param nChunks query int true "chunk数量"
+// @Success 200
+// @Router /image/:username/:name/startChunkUpload [post]
+func StartImageChunkUpload(c *gin.Context) {
+ logger := log.WithFunc("StartImageChunkUpload")
+ username := c.Param("username")
+ name := c.Param("name")
+ force := utils.GetBooleanQuery(c, "force", false)
+ chunkSize := c.Query("chunkSize")
+ nChunks := c.Query("nChunks")
+ if err := validateChunkSize(c, chunkSize); err != nil {
+ return
+ }
+ if err := validateNChunks(c, nChunks); err != nil {
+ return
+ }
+ var req types.ImageCreateRequest
+ defaults.SetDefaults(&req)
+ if err := c.ShouldBindWith(&req, binding.JSON); err != nil {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()})
+ return
+ }
+ if err := req.Check(); err != nil {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()})
+ return
+ }
+ tag := utils.NormalizeTag(req.Tag, req.Digest)
+ if err := validateRepoName(username, name); err != nil {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "invalid name"})
+ return
+ }
+ repo, img, err := common.GetRepoImageForUpload(c, username, name, tag)
+ if err != nil {
+ return
+ }
+
+ // if img exists and not force update, upload failed!
+ if img != nil && !force {
+ c.AbortWithStatusJSON(http.StatusConflict, gin.H{
+ "error": "Upload failed, image already exists. You can use force upload to overwrite.",
+ })
+ return
+ }
+
+ if repo == nil {
+ repo = &models.Repository{
+ Username: username,
+ Name: name,
+ Private: req.Private,
+ }
+ }
+ if img == nil {
+ reqLabels := models.Labels{}
+ if req.Labels != nil {
+ reqLabels = models.Labels(req.Labels)
+ }
+ img = &models.Image{
+ RepoID: repo.ID,
+ Tag: tag,
+ Labels: models.NewJSONColumn(&reqLabels),
+ Size: req.Size,
+ Digest: req.Digest,
+ Format: req.Format,
+ OS: models.NewJSONColumn(&req.OS),
+ Description: req.Description,
+ Repo: repo,
+ }
+ }
+
+ rdb := utils.GetRedisConn()
+
+ sto := storFact.Instance()
+ uploadID, err := sto.CreateChunkWrite(c, img.SliceName())
+ if err != nil {
+ logger.Error(c, err, "Failed to save file to storage [CreateChunkWrite]")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "internal error",
+ })
+ return
+ }
+
+ // set redis expiration time
+ for _, fStr := range []string{redisInfoKey, redisSliceKey} {
+ rKey := fmt.Sprintf(fStr, uploadID)
+ err = rdb.Expire(c, rKey, chunkRedisExpire).Err()
+ if err != nil {
+ logger.Errorf(c, err, "Failed to set expiration for %s", rKey)
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": "internal error"})
+ return
+ }
+ }
+ bs, err := json.Marshal(img)
+ if err != nil {
+ logger.Error(c, err, "failed marshal image tag")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "internal error, please try again",
+ })
+ return
+ }
+ err = rdb.HSet(c, fmt.Sprintf(redisInfoKey, uploadID),
+ redisImageHKey, string(bs),
+ redisForceHKey, strconv.FormatBool(force),
+ redisSizeHKey, chunkSize,
+ redisDigestHkey, req.Digest,
+ redisChunkNumHkey, nChunks,
+ ).Err()
+ if err != nil {
+ logger.Error(c, err, "Failed to set information and slices")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "Failed to set set expiration",
+ })
+ return
+ }
+ c.JSON(http.StatusOK, gin.H{
+ "data": map[string]any{
+ "uploadID": uploadID,
+ },
+ })
+}
+
+// UploadImageChunk upload image chunk
+//
+// @Summary upload image chunk
+// @Description UploadImageChunk upload image chunk
+// @Tags 镜像管理
+// @Accept json
+// @Produce json
+// @Param Authorization header string true "token"
+// @Param chunkIdx path string true "分片序列"
+// @Param uploadID query string true "上传uploadID"
+// @Param file formData file true "文件"
+// @Success 200
+// @Router /image/chunk/{chunkIdx}/upload [post]
+func UploadImageChunk(c *gin.Context) {
+ logger := log.WithFunc("UploadImageChunk")
+ chunkIdxStr := c.Param("chunkIdx")
+ uploadID := c.Query("uploadID")
+ digest := c.Query("digest")
+
+ if uploadID == "" {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "empty uploadID"})
+ return
+ }
+ chunkIdx, err := strconv.ParseInt(chunkIdxStr, 10, 64)
+ if err != nil {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
+ "error": "invalid chunk index",
+ })
+ return
+ }
+ file, err := c.FormFile("file")
+ if err != nil {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
+ "error": fmt.Sprintf("get upload file failed: %s", err),
+ })
+ return
+ }
+
+ rdb := utils.GetRedisConn()
+ rAns := rdb.HGetAll(c, fmt.Sprintf(redisInfoKey, uploadID))
+ if rAns.Err() != nil {
+ logger.Error(c, rAns.Err(), "Failed to set information and slices")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "Failed to set set expiration",
+ })
+ return
+ }
+ var (
+ img models.Image
+ chunkSize uint64
+ nChunks int
+ )
+ for k, v := range rAns.Val() {
+ switch k {
+ case redisImageHKey:
+ err = json.Unmarshal([]byte(v), &img)
+ case redisSizeHKey:
+ chunkSize, err = humanize.ParseBytes(v)
+ case redisChunkNumHkey:
+ nChunks, err = strconv.Atoi(v)
+ }
+ if err != nil {
+ logger.Errorf(c, err, "incorrect redis value: %s %s", k, v)
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "internal error, please try again",
+ })
+ return
+ }
+ }
+ if chunkIdx >= int64(nChunks) {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
+ "error": fmt.Sprintf("Only need %d chunks, but got chunk index %d", nChunks, chunkIdx),
+ })
+ return
+ }
+ sto := storFact.Instance()
+
+ fileOpen, err := file.Open()
+ if err != nil {
+ logger.Error(c, err, "Failed to open FileHeader")
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
+ "error": "bad request",
+ })
+ return
+ }
+ defer fileOpen.Close()
+
+ fp, err := os.CreateTemp("/tmp", "image-chunk-upload-")
+ if err != nil {
+ logger.Error(c, err, "failed to create temp file")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": "internal server error"})
+ return
+ }
+ defer os.Remove(fp.Name())
+ defer fp.Close()
+
+ h := sha256.New()
+ reader := io.TeeReader(fileOpen, h)
+
+ nwritten, err := io.Copy(fp, reader)
+ if err != nil {
+ logger.Errorf(c, err, "failed to save upload chunk to local temporary file")
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "bad request"})
+ return
+ }
+ if _, err := fp.Seek(0, 0); err != nil {
+ logger.Error(c, err, "failed to seek file")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": "internal server error"})
+ return
+ }
+ cInfo := &stotypes.ChunkInfo{
+ Idx: int(chunkIdx),
+ Size: nwritten,
+ ChunkSize: int64(chunkSize),
+ Digest: "",
+ In: fp,
+ }
+ err = sto.ChunkWrite(c, img.SliceName(), uploadID, cInfo)
+ if err != nil {
+ logger.Error(c, err, "Failed to save file to storage [ChunkWrite]")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "internal error",
+ })
+ return
+ }
+
+ // 计算哈希值
+ sum := h.Sum(nil)
+ contentDigest := fmt.Sprintf("%x", sum)
+
+ // check if the digest equals to user-passed digest
+ if digest != "" && contentDigest != digest {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
+ "error": fmt.Sprintf("invalid digest: got: %s, user passed: %s", img.Digest, digest),
+ })
+ return
+ }
+ cInfo.Digest = contentDigest
+
+ if err := rdb.HSet(c, fmt.Sprintf(redisSliceKey, uploadID), chunkIdx, cInfo).Err(); err != nil {
+ logger.Errorf(c, err, "failed to save chunk info %d to redis", chunkIdx)
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": "internal error, please try again"})
+ return
+ }
+ c.JSON(http.StatusOK, gin.H{
+ "msg": "upload chunk successfully",
+ })
+}
+
+// MergeChunk merge chunk slice file
+//
+// @Summary merge chunk slice file
+// @Description MergeChunk merge chunk slice file
+// @Tags 镜像管理
+// @Accept json
+// @Produce json
+// @Param Authorization header string true "token"
+// @Param uploadID query string true "上传uploadID"
+// @Success 200
+// @Router /image/chunk/merge [post]
+func MergeChunk(c *gin.Context) {
+ logger := log.WithFunc("MergeChunk")
+ uploadID := c.Query("uploadID")
+
+ if uploadID == "" {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
+ "error": "you must specify upload id",
+ })
+ return
+ }
+
+ rdb := utils.GetRedisConn()
+ kv, err := rdb.HGetAll(c, fmt.Sprintf(redisInfoKey, uploadID)).Result()
+ if err == redis.Nil {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
+ "error": "you should start chunk upload first",
+ })
+ return
+ }
+ if err != nil {
+ logger.Error(c, err, "Failed to get information and slices")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": "internal error"})
+ return
+ }
+
+ img := &models.Image{}
+ var (
+ // force bool
+ digest string
+ nChunks int
+ )
+ for k, v := range kv {
+ switch k {
+ case redisImageHKey:
+ err = json.Unmarshal([]byte(v), img)
+ case redisForceHKey:
+ // force, err = strconv.ParseBool(v)
+ _, err = strconv.ParseBool(v)
+ case redisDigestHkey:
+ digest = v
+ case redisChunkNumHkey:
+ nChunks, err = strconv.Atoi(v)
+ }
+ if err != nil {
+ logger.Errorf(c, err, "incorrect redis value: %s %s", k, v)
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "internal error, please try again",
+ })
+ return
+ }
+ }
+ repo := img.Repo
+
+ chunkList, err := checkChunkSlices(c, uploadID, nChunks)
+ if err != nil {
+ return
+ }
+ sto := storFact.Instance()
+
+ err = sto.CompleteChunkWrite(c, img.SliceName(), uploadID, chunkList)
+ if err != nil {
+ logger.Error(c, err, "Failed to save file to storage [CompleteChunkWrite]")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "internal error",
+ })
+ return
+ }
+
+ img.Size, err = sto.GetSize(c, img.SliceName())
+ if err != nil {
+ logger.Error(c, err, "failed get size of %s", img.SliceName())
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "internal error, please try again",
+ })
+ return
+ }
+ img.Digest, err = sto.GetDigest(c, img.SliceName())
+ if err != nil {
+ logger.Error(c, err, "failed get digest of %s", img.SliceName())
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "internal error, please try again",
+ })
+ return
+ }
+ if digest != "" && digest != img.Digest {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
+ "error": fmt.Sprintf("invalid digest: got: %s, user passed: %s", img.Digest, digest),
+ })
+ return
+ }
+
+ if err = sto.Move(c, img.SliceName(), img.Fullname()); err != nil {
+ logger.Error(c, err, "failed move %s to %s", img.SliceName(), img.Fullname())
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "internal error, please try again",
+ })
+ return
+ }
+
+ tx, err := models.Instance().Beginx()
+ if err != nil {
+ logger.Error(c, err, "failed get transaction")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "internal error, please try again",
+ })
+ return
+ }
+ if repo.ID == 0 {
+ if err = repo.Save(tx); err != nil {
+ logger.Error(c, err, "failed vsave image to db")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "internal error, please try again",
+ })
+ return
+ }
+ }
+ if err = repo.SaveImage(tx, img); err != nil {
+ logger.Error(c, err, "failed save image tag to db")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "internal error, please try again",
+ })
+ return
+ }
+ _ = tx.Commit()
+ err = rdb.Del(c, fmt.Sprintf(redisInfoKey, uploadID), fmt.Sprintf(redisSliceKey, uploadID)).Err()
+ if err != nil {
+ // just log error
+ logger.Error(c, err, "Failed to delete chunk keys in redis for %d", uploadID)
+ }
+ // if err := task.SendImageTask(img.ID, force); err != nil {
+ // logger.Warnf(c, "failed to sned image preparation task")
+ // }
+ c.JSON(http.StatusOK, gin.H{
+ "msg": "merge success",
+ "data": "",
+ })
+
+}
+
+func checkChunkSlices(c *gin.Context, uploadID string, nChunks int) (ans []*stotypes.ChunkInfo, err error) {
+ logger := log.WithFunc("checkChunkSlice")
+ rdb := utils.GetRedisConn()
+ kv, err := rdb.HGetAll(c, fmt.Sprintf(redisSliceKey, uploadID)).Result()
+ if err == redis.Nil {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
+ "error": "you should start chunk upload first",
+ })
+ return nil, terrors.ErrPlaceholder
+ }
+ if err != nil {
+ logger.Error(c, err, "Failed to get slices from redis")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "Failed to get chunk info from redis",
+ })
+ return nil, terrors.ErrPlaceholder
+ }
+
+ if nChunks != len(kv) {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
+ "error": fmt.Sprintf("need %d chunks, but only got %d chunks", nChunks, len(kv)),
+ })
+ return nil, terrors.ErrPlaceholder
+ }
+ intSet := map[int]struct{}{}
+ for idx := 0; idx < len(kv); idx++ {
+ intSet[idx] = struct{}{}
+ }
+ for k, v := range kv {
+ cIdx, err := strconv.Atoi(k)
+ if err != nil {
+ logger.Errorf(c, err, "invalid slice key %s", k)
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": "internal error, please try again"})
+ return nil, terrors.ErrPlaceholder
+ }
+ cInfo := &stotypes.ChunkInfo{}
+ if err = json.Unmarshal([]byte(v), cInfo); err != nil {
+ logger.Errorf(c, err, "invalid slice value %s %s", k, v)
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": "internal error, please try again"})
+ return nil, terrors.ErrPlaceholder
+ }
+ ans = append(ans, cInfo)
+
+ if _, ok := intSet[cIdx]; !ok {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
+ "error": fmt.Sprintf("miss chunks, current chunks %v", kv),
+ })
+ return nil, terrors.ErrPlaceholder
+ }
+ delete(intSet, cIdx)
+ }
+ if len(intSet) != 0 {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
+ "error": fmt.Sprintf("miss chunks, current chunks %v", kv),
+ })
+ return nil, terrors.ErrPlaceholder
+ }
+ return ans, nil
+}
diff --git a/internal/api/image/chunk_test.go b/internal/api/image/chunk_test.go
new file mode 100644
index 0000000..44567e4
--- /dev/null
+++ b/internal/api/image/chunk_test.go
@@ -0,0 +1,329 @@
+package image
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "math"
+ "mime/multipart"
+ "net/http"
+ "net/http/httptest"
+
+ "github.com/DATA-DOG/go-sqlmock"
+ "github.com/projecteru2/vmihub/internal/models"
+ "github.com/projecteru2/vmihub/internal/testutils"
+ "github.com/projecteru2/vmihub/internal/utils"
+ "github.com/projecteru2/vmihub/pkg/types"
+ pkgutils "github.com/projecteru2/vmihub/pkg/utils"
+ "github.com/stretchr/testify/mock"
+)
+
+func (suite *imageTestSuite) TestDownloadImageChunk() {
+ {
+ utils.MockRedis.FlushAll()
+ // anonymous user can't download private image
+ wantRows := sqlmock.NewRows([]string{"id", "username", "name", "private"}).
+ AddRow(1, "user1", "name1", true)
+ models.Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE username = ? AND name = ?", repoColumns, repoTableName)).
+ WithArgs("user1", "name1").
+ WillReturnRows(wantRows)
+ w := httptest.NewRecorder()
+ req, _ := http.NewRequest("GET", "/api/v1/image/user1/name1/chunk/0/download?chunkSize=2", nil)
+ suite.r.ServeHTTP(w, req)
+
+ suite.Equal(http.StatusForbidden, w.Code)
+ // suite.Equal("ok", w.Body.String())
+ }
+ {
+ utils.MockRedis.FlushAll()
+ // a user can't read a private image which belongs other user
+ user, pass := "user2", "pass2"
+ err := testutils.PrepareUserData(user, pass)
+ suite.Nil(err)
+ wantRows := sqlmock.NewRows([]string{"id", "username", "name", "private"}).
+ AddRow(1, "user1", "name1", true)
+ models.Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE username = ? AND name = ?", repoColumns, repoTableName)).
+ WithArgs("user1", "name1").
+ WillReturnRows(wantRows)
+ w := httptest.NewRecorder()
+ req, _ := http.NewRequest("GET", "/api/v1/image/user1/name1/chunk/0/download?chunkSize=2", nil)
+ testutils.AddAuth(req, user, pass)
+ suite.r.ServeHTTP(w, req)
+
+ suite.Equal(http.StatusForbidden, w.Code)
+ }
+ {
+ utils.MockRedis.FlushAll()
+ chunkIdx := 1
+ chunkSize := 2
+ // normal case
+ user, pass := "user1", "pass1"
+ err := testutils.PrepareUserData(user, pass)
+ suite.Nil(err)
+ wantRows := sqlmock.NewRows([]string{"id", "username", "name", "private"}).
+ AddRow(1, "user1", "name1", true)
+ models.Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE username = ? AND name = ?", repoColumns, repoTableName)).
+ WithArgs("user1", "name1").
+ WillReturnRows(wantRows)
+
+ wantRows = sqlmock.NewRows([]string{"id", "repo_id", "tag", "size", "os"}).
+ AddRow(2, 1, "tag1", len(testContent), []byte("{}"))
+ models.Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE repo_id = ? AND tag = ?", imgColumns, imgTableName)).
+ WithArgs(1, "tag1").
+ WillReturnRows(wantRows)
+ sto := testutils.GetMockStorage()
+ defer sto.AssertExpectations(suite.T())
+
+ offset := chunkIdx * chunkSize
+ sto.On("SeekRead", mock.Anything, mock.Anything, mock.Anything, mock.Anything).
+ Return(io.NopCloser(bytes.NewBufferString(testContent[offset:])), nil).Once()
+
+ w := httptest.NewRecorder()
+ req, _ := http.NewRequest("GET", fmt.Sprintf("/api/v1/image/user1/name1/chunk/%d/download?chunkSize=%d&tag=tag1", chunkIdx, chunkSize), nil)
+ testutils.AddAuth(req, user, pass)
+ suite.r.ServeHTTP(w, req)
+
+ suite.Equal(http.StatusOK, w.Code)
+ suite.Equal(testContent[offset:offset+chunkSize], w.Body.String())
+ }
+}
+
+func (suite *imageTestSuite) TestStartChunkUpload() {
+ digest, err := pkgutils.CalcDigestOfStr(testContent)
+ suite.Nil(err)
+ url := "/api/v1/image/user1/name1/startChunkUpload?chunkSize=2&nChunks=2"
+
+ body := types.ImageCreateRequest{
+ Username: "user1",
+ Name: "name1",
+ Tag: "tag1",
+ Size: int64(len(testContent)),
+ Digest: digest,
+ Format: "qcow2",
+ OS: types.OSInfo{
+ Arch: "arm64",
+ Type: "linux",
+ Distrib: "ubuntu",
+ Version: "22.04",
+ },
+ }
+ bs, _ := json.Marshal(body)
+ {
+ // invalid arguments
+ utils.MockRedis.FlushAll()
+ w := httptest.NewRecorder()
+ req, _ := http.NewRequest("POST", url, nil)
+ suite.r.ServeHTTP(w, req)
+
+ suite.Equal(http.StatusBadRequest, w.Code)
+ // suite.Equal("ok", w.Body.String())
+ }
+ {
+ utils.MockRedis.FlushAll()
+ // anonymous user can't upload image
+ w := httptest.NewRecorder()
+ req, _ := http.NewRequest("POST", url, bytes.NewReader(bs))
+ suite.r.ServeHTTP(w, req)
+
+ suite.Equal(http.StatusUnauthorized, w.Code)
+ // suite.Equal("ok", w.Body.String())
+ }
+ {
+ utils.MockRedis.FlushAll()
+ // a user can't write a image which belongs other user
+ user, pass := "user2", "pass2"
+ err := testutils.PrepareUserData(user, pass)
+ suite.Nil(err)
+ w := httptest.NewRecorder()
+ req, _ := http.NewRequest("POST", url, bytes.NewReader(bs))
+ testutils.AddAuth(req, user, pass)
+ suite.r.ServeHTTP(w, req)
+
+ suite.Equal(http.StatusForbidden, w.Code)
+ }
+ {
+ utils.MockRedis.FlushAll()
+ // conflict cases
+ user, pass := "user1", "pass1"
+ err := testutils.PrepareUserData(user, pass)
+ suite.Nil(err)
+ wantRows := sqlmock.NewRows([]string{"id", "username", "name", "private"}).
+ AddRow(1, "user1", "name1", true)
+ models.Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE username = ? AND name = ?", repoColumns, repoTableName)).
+ WithArgs("user1", "name1").
+ WillReturnRows(wantRows)
+
+ wantRows = sqlmock.NewRows([]string{"id", "repo_id", "tag"}).
+ AddRow(2, 1, "tag1")
+ models.Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE repo_id = ? AND tag = ?", imgColumns, imgTableName)).
+ WithArgs(1, "tag1").
+ WillReturnRows(wantRows)
+
+ w := httptest.NewRecorder()
+ req, _ := http.NewRequest("POST", url, bytes.NewReader(bs))
+ testutils.AddAuth(req, user, pass)
+ suite.r.ServeHTTP(w, req)
+
+ suite.Equal(http.StatusConflict, w.Code)
+ }
+ {
+ utils.MockRedis.FlushAll()
+ // normal cases
+ user, pass := "user1", "pass1"
+ err := testutils.PrepareUserData(user, pass)
+ suite.Nil(err)
+ models.Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE username = ? AND name = ?", repoColumns, repoTableName)).
+ WithArgs("user1", "name1").
+ WillReturnRows(sqlmock.NewRows([]string{"id", "username", "name", "private"}))
+
+ sto := testutils.GetMockStorage()
+ sto.On("CreateChunkWrite", mock.Anything, mock.Anything).Return(mock.Anything, nil)
+
+ w := httptest.NewRecorder()
+ req, _ := http.NewRequest("POST", url, bytes.NewReader(bs))
+ testutils.AddAuth(req, user, pass)
+ suite.r.ServeHTTP(w, req)
+
+ suite.Equal(http.StatusOK, w.Code)
+ raw := map[string]any{}
+ bs, err := io.ReadAll(w.Body)
+ err = json.Unmarshal(bs, &raw)
+ suite.Nil(err)
+ suite.NotNilf(raw["data"], "++++++ %v", raw)
+ data := raw["data"].(map[string]any)
+ uploadID := data["uploadID"].(string)
+ suite.NotEmpty(uploadID)
+ }
+}
+
+func (suite *imageTestSuite) TestUploadChunk() {
+ // gomonkey.ApplyFuncReturn(task.SendImageTask, nil)
+
+ digest, err := pkgutils.CalcDigestOfStr(testContent)
+ suite.Nil(err)
+
+ body := types.ImageCreateRequest{
+ Username: "user1",
+ Name: "name1",
+ Tag: "tag1",
+ Size: int64(len(testContent)),
+ Digest: digest,
+ Format: "qcow2",
+ OS: types.OSInfo{
+ Arch: "arm64",
+ Type: "linux",
+ Distrib: "ubuntu",
+ Version: "22.04",
+ },
+ }
+ bs, _ := json.Marshal(body)
+
+ chunkSize := 2
+ nChunks := int64(math.Ceil(float64(len(testContent)) / float64(chunkSize)))
+ {
+ utils.MockRedis.FlushAll()
+ // normal cases
+ user, pass := "user1", "pass1"
+ err := testutils.PrepareUserData(user, pass)
+ suite.Nil(err)
+ models.Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE username = ? AND name = ?", repoColumns, repoTableName)).
+ WithArgs("user1", "name1").
+ WillReturnRows(sqlmock.NewRows([]string{"id", "username", "name", "private"}))
+
+ sto := testutils.GetMockStorage()
+ defer sto.AssertExpectations(suite.T())
+ sto.On("CreateChunkWrite", mock.Anything, mock.Anything).Return(mock.Anything, nil)
+
+ w := httptest.NewRecorder()
+ url := fmt.Sprintf("/api/v1/image/user1/name1/startChunkUpload?chunkSize=%d&nChunks=%d", chunkSize, nChunks)
+ req, _ := http.NewRequest("POST", url, bytes.NewReader(bs))
+ testutils.AddAuth(req, user, pass)
+ suite.r.ServeHTTP(w, req)
+
+ suite.Equal(http.StatusOK, w.Code)
+ raw := map[string]any{}
+ bs, err := io.ReadAll(w.Body)
+ err = json.Unmarshal(bs, &raw)
+ suite.Nil(err)
+ data := raw["data"].(map[string]any)
+ uploadID := data["uploadID"].(string)
+
+ // upload chunks
+ nChunks := math.Ceil(float64(len(testContent)) / float64(chunkSize))
+ for cIdx := int(0); cIdx < int(nChunks); cIdx++ {
+ start, end := cIdx*chunkSize, (cIdx+1)*chunkSize
+ if end > len(testContent) {
+ end = len(testContent)
+ }
+
+ w = httptest.NewRecorder()
+ body := &bytes.Buffer{}
+ writer := multipart.NewWriter(body)
+ part, err := writer.CreateFormFile("file", "/tmp/haha")
+ suite.Nil(err)
+ _, err = part.Write([]byte(testContent[start:end]))
+ suite.Nil(err)
+ writer.Close()
+
+ // copyCIdx := cIdx
+ // sto.On("ChunkWrite", mock.Anything, mock.Anything, mock.Anything, mock.MatchedBy(func(chunk *stotypes.ChunkInfo) bool {
+ // // AssertExpectations will call this function second time
+ // fmt.Printf("++++++%d => %v\n", copyCIdx, chunk)
+ // if chunk.Idx == 0 {
+ // return true
+ // }
+ // suite.Equal(chunk.Idx, copyCIdx)
+ // bs, err := io.ReadAll(chunk.In)
+ // suite.Nil(err)
+ // suite.Equal(testContent[start:end], string(bs))
+ // chunk.In = bytes.NewReader(bs)
+ // return true
+ // })).Return(nil).Once()
+
+ sto.On("ChunkWrite", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
+ url := fmt.Sprintf("/api/v1/image/chunk/%d/upload?uploadID=%s", cIdx, uploadID)
+ req, _ = http.NewRequest("POST", url, body)
+ testutils.AddAuth(req, user, pass)
+ req.Header.Set("Content-Type", writer.FormDataContentType())
+
+ suite.r.ServeHTTP(w, req)
+
+ suite.Equalf(http.StatusOK, w.Code, "error: %v", w.Body.String())
+ }
+
+ fmt.Printf("++++++ all chunk upload done\n")
+ suite.testMergeChunk(uploadID, digest)
+ }
+}
+
+func (suite *imageTestSuite) testMergeChunk(uploadID, digest string) {
+ user, pass := "user1", "pass1"
+ models.Mock.ExpectBegin()
+ models.Mock.ExpectExec("INSERT INTO repository(username, name, private) VALUES(?, ?, ?)").
+ WithArgs("user1", "name1", false).
+ WillReturnResult(sqlmock.NewResult(1234, 1))
+
+ models.Mock.ExpectExec("INSERT INTO image(repo_id, tag, labels, size, format, os, digest, snapshot, description) VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)").
+ WithArgs(1234, "tag1", sqlmock.AnyArg(), len(testContent), "qcow2", sqlmock.AnyArg(), digest, sqlmock.AnyArg(), sqlmock.AnyArg()).
+ WillReturnResult(sqlmock.NewResult(1234, 1))
+ models.Mock.ExpectCommit()
+
+ sto := testutils.GetMockStorage()
+ // defer sto.AssertExpectations(suite.T())
+
+ sto.On("Move", mock.Anything, mock.Anything, mock.Anything).Return(nil)
+ sto.On("GetSize", mock.Anything, mock.Anything).Return(int64(len(testContent)), nil)
+ sto.On("GetDigest", mock.Anything, mock.Anything).Return(digest, nil)
+
+ sto.On("CompleteChunkWrite", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil)
+
+ w := httptest.NewRecorder()
+ url := fmt.Sprintf("/api/v1/image/chunk/merge?uploadID=%s", uploadID)
+ req, _ := http.NewRequest("POST", url, nil)
+ testutils.AddAuth(req, user, pass)
+ suite.r.ServeHTTP(w, req)
+
+ fmt.Printf("+++++++++ merge: %s\n", w.Body.String())
+ suite.Equal(http.StatusOK, w.Code)
+}
diff --git a/internal/api/image/image.go b/internal/api/image/image.go
new file mode 100644
index 0000000..12fa37b
--- /dev/null
+++ b/internal/api/image/image.go
@@ -0,0 +1,917 @@
+package image
+
+import (
+ "context"
+ "crypto/sha256"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "strconv"
+ "sync"
+ "sync/atomic"
+
+ "github.com/projecteru2/core/log"
+ "github.com/projecteru2/vmihub/internal/common"
+ storFact "github.com/projecteru2/vmihub/internal/storage/factory"
+ storTypes "github.com/projecteru2/vmihub/internal/storage/types"
+
+ "github.com/projecteru2/vmihub/internal/utils"
+ "github.com/projecteru2/vmihub/pkg/terrors"
+ "github.com/projecteru2/vmihub/pkg/types"
+
+ "github.com/cockroachdb/errors"
+ "github.com/gin-gonic/gin"
+ "github.com/gin-gonic/gin/binding"
+ "github.com/mcuadros/go-defaults"
+ "github.com/panjf2000/ants/v2"
+ _ "github.com/projecteru2/vmihub/cmd/vmihub/docs" // for doc
+ "github.com/projecteru2/vmihub/internal/models"
+ "github.com/redis/go-redis/v9"
+)
+
+const (
+ defaultTag = "latest"
+ chunkThreshold = 4 * utils.GB
+)
+
+func SetupRouter(r *gin.RouterGroup) {
+ imageGroup := r.Group("/image")
+
+ repoGroup := r.Group("/repository")
+
+ imageGroup.POST("/:username/:name/startChunkUpload", StartImageChunkUpload)
+ imageGroup.POST("/chunk/:chunkIdx/upload", UploadImageChunk)
+ imageGroup.POST("/chunk/merge", MergeChunk)
+ imageGroup.GET("/:username/:name/chunk/:chunkIdx/download", DownloadImageChunk)
+
+ // Get image information
+ imageGroup.GET("/:username/:name/info", GetImageInfo)
+ // download image file
+ imageGroup.GET("/:username/:name/download", DownloadImage)
+
+ // upload image file
+ imageGroup.POST("/:username/:name/startUpload", StartImageUpload)
+ imageGroup.POST("/:username/:name/upload", UploadImage)
+
+ // delete image info form db and file from store
+ imageGroup.DELETE("/:username/:name", DeleteImage)
+
+ // Return image Info list of current user
+ r.GET("/repositories", ListRepositories)
+ // List image
+ r.GET("/images", ListImages)
+ // Return image list of specified repository.
+ repoGroup.GET("/:username/:name/images", ListRepoImages)
+ repoGroup.DELETE("/:username/:name", DeleteRepository)
+}
+
+// ListRepositories get repository list of specified user or current user
+//
+// @Summary get repository list
+// @Description ListRepositories get repository list
+// @Tags 镜像管理
+// @Accept json
+// @Produce json
+// @Param Authorization header string true "token"
+// @Param username query string false "用户名"
+// @Param page query int false "页码" default(1)
+// @Param pageSize query int false "每一页数量" default(10)
+// @Success 200
+// @Router /repositories [get]
+func ListRepositories(c *gin.Context) {
+ username := c.Query("username")
+ pNum := 1
+ page := c.DefaultQuery("page", "1")
+ pSize := 10
+ pageSize := c.DefaultQuery("pageSize", "10")
+ if page != "" {
+ pNum, _ = strconv.Atoi(page)
+ }
+ if pageSize != "" {
+ pSize, _ = strconv.Atoi(pageSize)
+ }
+
+ curUser, ok := common.LoginUser(c)
+ if !ok && username == "" {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
+ "error": "you need login or provide a username as query parameter.",
+ })
+ return
+ }
+ var repos []models.Repository
+ var err error
+ if username == "" {
+ repos, err = models.QueryRepoList(curUser.Username, pNum, pSize)
+ } else {
+ if curUser != nil && (curUser.Admin || curUser.Username == username) {
+ repos, err = models.QueryRepoList(username, pNum, pSize)
+ } else {
+ repos, err = models.QueryPublicRepoList(username, pNum, pSize)
+ }
+ }
+ if err != nil {
+ log.WithFunc("ListRepositories").Error(c, err, "Can't query repositories from database")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "msg": "internal error",
+ })
+ return
+ }
+
+ c.JSON(http.StatusOK, gin.H{
+ "data": repos,
+ })
+}
+
+// GetImageInfo get image meta info
+//
+// @Summary get image meta info
+// @Description GetImageInfo get image meta info
+// @Tags 镜像管理
+// @Accept json
+// @Produce json
+// @Param Authorization header string true "token"
+// @Param username path string true "仓库用户名"
+// @Param name path string true "仓库名"
+// @Param tag query string false "镜像标签" default("latest")
+// @Success 200
+// @Router /image/{username}/{name}/info [get]
+func GetImageInfo(c *gin.Context) {
+ username := c.Param("username")
+ name := c.Param("name")
+ tag := c.DefaultQuery("tag", defaultTag)
+ err := validateRepoName(username, name)
+ if err != nil {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
+ "error": err,
+ })
+ return
+ }
+ repo, err := getRepo(c, username, name, "read")
+ if err != nil {
+ return
+ }
+ img, err := getRepoImage(c, repo, tag)
+ if err != nil {
+ return
+ }
+ c.JSON(http.StatusOK, gin.H{
+ "msg": "success",
+ "data": convImageInfoResp(img),
+ })
+}
+
+// ListRepoImages get image list of repository
+//
+// @Summary get image list of specified repository
+// @Description ListRepoImages get image list of specified repo
+// @Tags 镜像管理
+// @Accept json
+// @Produce json
+// @Param Authorization header string true "token"
+// @Param username path string true "用户名"
+// @Param name path string true "仓库名"
+// @Success 200
+// @Router /repository/{username}/{name} [get]
+func ListRepoImages(c *gin.Context) {
+ username := c.Param("username")
+ name := c.Param("name")
+ err := validateRepoName(username, name)
+ if err != nil {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
+ "msg": "invaid name",
+ })
+ return
+ }
+ repo, err := getRepo(c, username, name, "read")
+ if err != nil {
+ return
+ }
+
+ images, err := repo.GetImages()
+ if err != nil {
+ log.WithFunc("ListImageTags").Error(c, err, "Can't get image tags from database")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "msg": "query image error",
+ })
+ return
+ }
+
+ resp := make([]*types.ImageInfoResp, len(images))
+ for idx := 0; idx < len(images); idx++ {
+ resp[idx] = convImageInfoResp(&images[idx])
+ }
+ c.JSON(http.StatusOK, gin.H{
+ "data": resp,
+ })
+}
+
+// DeleteImage delete repository
+//
+// @Summary delete specified repository
+// @Description DeleteRepository
+// @Tags 镜像管理
+// @Accept json
+// @Produce json
+// @Param Authorization header string true "token"
+// @Param username path string true "用户名"
+// @Param name path string true "仓库名"
+// @Success 200
+// @Router /repository/{username}/{name} [delete]
+func DeleteRepository(c *gin.Context) {
+ name := c.Param("name")
+ username := c.Param("username")
+
+ err := validateRepoName(username, name)
+ if err != nil {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "invalid name"})
+ return
+ }
+ repo, err := getRepo(c, username, name, "write")
+ if err != nil {
+ return
+ }
+
+ images, err := repo.GetImages()
+ if err != nil {
+ log.WithFunc("DeleteImage").Error(c, err, "internal error")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "msg": "internal error",
+ })
+ }
+ stor := storFact.Instance()
+ for _, img := range images {
+ err = stor.Delete(context.Background(), img.Fullname(), true)
+ if err != nil {
+ // Try best bahavior, so just log error
+ log.WithFunc("DeleteImage").Errorf(c, err, "failed to remove image %s from storage", img.Fullname())
+ }
+ }
+
+ tx, err := models.Instance().Beginx()
+ if err != nil {
+ log.WithFunc("DeleteImage").Error(c, err, "failed to get transaction")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "msg": "internal error",
+ })
+ }
+ if err = repo.Delete(tx); err != nil {
+ log.WithFunc("DeleteImage").Error(c, err, "internal error")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "msg": "internal error",
+ })
+ }
+ _ = tx.Commit()
+
+ c.JSON(http.StatusOK, gin.H{
+ "msg": "delete success",
+ "data": "",
+ })
+}
+
+// StartUpload start single file upload session
+//
+// @Summary upload image file
+// @Description StartUpload upload image file
+// @Tags 镜像管理
+// @Accept json
+// @Produce json
+// @Param Authorization header string true "token"
+// @Param username path string true "用户名"
+// @Param name path string true "镜像名"
+// @Param force query bool false "强制上传(覆盖)" default("false")
+// @Param body body types.ImageCreateRequest true "镜像配置"
+// @Success 200
+// @Router /image/:username/:name/startUpload [post]
+func StartImageUpload(c *gin.Context) {
+ logger := log.WithFunc("StartImageUpload")
+ username := c.Param("username")
+ name := c.Param("name")
+ force := utils.GetBooleanQuery(c, "force", false)
+ var req types.ImageCreateRequest
+ defaults.SetDefaults(&req)
+ if err := c.ShouldBindWith(&req, binding.JSON); err != nil {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()})
+ return
+ }
+ if err := req.Check(); err != nil {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()})
+ return
+ }
+ tag := utils.NormalizeTag(req.Tag, req.Digest)
+ if err := validateRepoName(username, name); err != nil {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "invalid name"})
+ return
+ }
+ repo, img, err := common.GetRepoImageForUpload(c, username, name, tag)
+ if err != nil {
+ return
+ }
+
+ // if img exists and not force update, upload failed!
+ if img != nil && !force {
+ c.AbortWithStatusJSON(http.StatusConflict, gin.H{
+ "error": "Upload failed, image already exists. You can use force upload to overwrite.",
+ })
+ return
+ }
+
+ if repo == nil {
+ repo = &models.Repository{
+ Username: username,
+ Name: name,
+ Private: req.Private,
+ }
+ }
+ if img == nil {
+ reqLabels := models.Labels{}
+ if req.Labels != nil {
+ reqLabels = models.Labels(req.Labels)
+ }
+ img = &models.Image{
+ RepoID: repo.ID,
+ Tag: tag,
+ Labels: models.NewJSONColumn(&reqLabels),
+ Size: req.Size,
+ Digest: req.Digest,
+ Format: req.Format,
+ OS: models.NewJSONColumn(&req.OS),
+ Description: req.Description,
+ Repo: repo,
+ }
+
+ }
+
+ if req.URL != "" {
+ if err := processRemoteImageFile(c, img, req.URL); err != nil {
+ return
+ }
+ // logger.Debugf(c, "send image task")
+ // if err := task.SendImageTask(img.ID, force); err != nil {
+ // logger.Warnf(c, "failed to send image preparation task")
+ // }
+ c.JSON(http.StatusOK, gin.H{
+ "msg": "upload remote file successfully",
+ "data": map[string]any{
+ "uploadID": "",
+ },
+ })
+ return
+ }
+ uploadID, err := newUploadID()
+ if err != nil {
+ logger.Error(c, err, "failed to generate upload id")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": err})
+ return
+ }
+ rdb := utils.GetRedisConn()
+ bs, _ := json.Marshal(img)
+ logger.Debugf(c, "uploadID : %s", uploadID)
+ if err := rdb.HSet(
+ c, fmt.Sprintf(redisInfoKey, uploadID),
+ redisImageHKey, string(bs),
+ redisForceHKey, strconv.FormatBool(force),
+ ).Err(); err != nil {
+ logger.Error(c, err, "Failed to set image information to redis")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "Failed to set image information to redis",
+ })
+ return
+ }
+ c.JSON(http.StatusOK, gin.H{
+ "data": map[string]any{
+ "uploadID": uploadID,
+ },
+ })
+}
+
+// UploadImage upload image
+//
+// @Summary upload image
+// @Description UploadImage upload image
+// @Tags 镜像管理
+// @Accept json
+// @Produce json
+// @Param Authorization header string true "token"
+// @Param username path string true "仓库用户名"
+// @Param name path string true "仓库名"
+// @Param force query bool false "强制上传(覆盖)" default("false")
+// @Param file formData file true "文件"
+// @Success 200
+// @Router /image/{username}/{name}/upload [post]
+func UploadImage(c *gin.Context) {
+ logger := log.WithFunc("UploadImage")
+ username := c.Param("username")
+ name := c.Param("name")
+
+ uploadID := c.Query("uploadID")
+ if uploadID == "" {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "empty uploadID"})
+ return
+ }
+ err := validateRepoName(username, name)
+ if err != nil {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
+ "error": fmt.Sprintf("invaid name(%s)", err),
+ })
+ return
+ }
+
+ // upload single file
+ file, err := c.FormFile("file")
+ if err != nil {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
+ "error": fmt.Sprintf("invalid request: bad file(%s)", err),
+ })
+ return
+ }
+
+ fileOpen, err := file.Open()
+ if err != nil {
+ logger.Error(c, err, "Failed to open FileHeader")
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
+ "error": "bad request",
+ })
+ return
+ }
+ defer fileOpen.Close()
+
+ rdb := utils.GetRedisConn()
+ kv, err := rdb.HGetAll(c, fmt.Sprintf(redisInfoKey, uploadID)).Result()
+ if err == redis.Nil {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
+ "error": "you should start image upload first",
+ })
+ return
+ }
+ if err != nil {
+ logger.Error(c, err, "Failed to get upload information from redis")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": "internal error"})
+ return
+ }
+ logger.Debugf(c, "uploadID : %s", uploadID)
+ img := &models.Image{}
+ // var force bool
+ for k, v := range kv {
+ switch k {
+ case redisImageHKey:
+ err = json.Unmarshal([]byte(v), &img)
+ case redisForceHKey:
+ // force, err = strconv.ParseBool(v)
+ _, err = strconv.ParseBool(v)
+ default:
+ err = errors.Newf("unknown redis hash key %s", k)
+ }
+ if err != nil {
+ logger.Errorf(c, err, "incorrect redis value: %s %s", k, v)
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "internal error",
+ })
+ return
+ }
+ }
+ if img.Repo == nil {
+ logger.Errorf(c, err, "there is no image info in redis for uploadID: %s", uploadID)
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "internal error",
+ })
+ return
+ }
+
+ // 这里之所以要写入一个临时文件是因为文件很大的时候需要分片上传
+ // 分片上传为了加快进度是做了并发处理的,这时候需要并发的open, seek,用一个本地文件更方便
+ fp, err := os.CreateTemp("/tmp", "image-upload-")
+ if err != nil {
+ logger.Error(c, err, "failed to create temp file")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": "internal server error"})
+ return
+ }
+ defer os.Remove(fp.Name())
+ defer fp.Close()
+
+ nwritten, err := io.Copy(fp, fileOpen)
+ if err != nil {
+ logger.Errorf(c, err, "failed to save upload file to local")
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "bad request"})
+ return
+ }
+ if err = writeDataToStorage(c, img, fp.Name(), nwritten); err != nil {
+ return
+ }
+
+ // logger.Debugf(c, "send image task")
+ // if err := task.SendImageTask(img.ID, force); err != nil {
+ // logger.Warnf(c, "failed to send image preparation task")
+ // }
+
+ c.JSON(http.StatusOK, gin.H{
+ "msg": "upload image successfully",
+ })
+}
+
+// DownloadImage download image
+//
+// @Summary download image
+// @Description DownloadImage download image
+// @Tags 镜像管理
+// @Accept json
+// @Produce json
+// @Param Authorization header string true "token"
+// @Param username path string true "仓库用户名"
+// @Param name path string true "仓库名"
+// @Param tag query string false "镜像标签" default("latest")
+// @Success 200
+// @Router /image/{username}/{name}/download [get]
+func DownloadImage(c *gin.Context) {
+ username := c.Param("username")
+ name := c.Param("name")
+
+ tag := c.DefaultQuery("tag", "latest")
+ err := validateRepoName(username, name)
+ if err != nil {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "invalid name"})
+ return
+ }
+ repo, err := getRepo(c, username, name, "read")
+ if err != nil {
+ return
+ }
+ img, err := getRepoImage(c, repo, tag)
+ if err != nil {
+ return
+ }
+ if img.Format == models.ImageFormatRBD {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "image created from system disk doesn't support download"})
+ return
+ }
+ sto := storFact.Instance()
+
+ file, err := sto.Get(context.Background(), img.Fullname())
+ if err != nil {
+ log.WithFunc("DownloadImage").Error(c, err, "Failed to get image file")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "msg": "Failed to get image file",
+ })
+ return
+ }
+ defer file.Close()
+
+ c.Header("Content-Type", "application/octet-stream")
+ c.Header("Content-Disposition", "attachment; filename="+img.Fullname())
+ c.Header("Content-Length", fmt.Sprintf("%d", img.Size))
+
+ // write content to response
+ _, err = io.Copy(c.Writer, file)
+ if err != nil {
+ log.WithFunc("DownloadImage").Error(c, err, "Failed to get copy file form storage")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "Failed to download file"})
+ return
+ }
+}
+
+// DeleteImage delete image
+//
+// @Summary delete image
+// @Description DeleteImage delete image
+// @Tags 镜像管理
+// @Accept json
+// @Produce json
+// @Param Authorization header string true "token"
+// @Param username path string true "仓库用户名"
+// @Param name path string true "仓库名"
+// @Param tag query string false "镜像标签" default("latest")
+// @Success 200
+// @Router /image/{username}/{name} [delete]
+func DeleteImage(c *gin.Context) {
+ logger := log.WithFunc("DeleteImage")
+ name := c.Param("name")
+ username := c.Param("username")
+ tag := c.DefaultQuery("tag", defaultTag)
+
+ err := validateRepoName(username, name)
+ if err != nil {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "invalid name"})
+ return
+ }
+ repo, err := getRepo(c, username, name, "write")
+ if err != nil {
+ return
+ }
+ img, err := getRepoImage(c, repo, tag)
+ if err != nil {
+ return
+ }
+
+ sto := storFact.Instance()
+ if err = sto.Delete(c, img.Fullname(), true); err != nil {
+ // Try best bahavior, so just log error
+ logger.Errorf(c, err, "failed to remove image %s from storage", img.Fullname())
+ }
+ if err = repo.DeleteImage(nil, img.Tag); err != nil {
+ logger.Error(c, err, "failed to delete image")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": "internal error"})
+ }
+
+ c.JSON(http.StatusOK, gin.H{
+ "msg": "delete image successfully",
+ })
+}
+
+// ListImages get image list of current user or specified user
+//
+// @Summary get image list
+// @Description ListImages get images list
+// @Tags 镜像管理
+// @Accept json
+// @Produce json
+// @Param Authorization header string true "token"
+// @Param keyword query string false "搜索关键字" default()
+// @Param username query string false "用户名"
+// @Param page query int false "页码" default(1)
+// @Param pageSize query int false "每一页数量" default(10)
+// @success 200 {object} types.JSONResult{data=[]types.ImageInfoResp} "desc"
+// @Router /images [get]
+func ListImages(c *gin.Context) {
+ logger := log.WithFunc("ListImages")
+ username := c.Query("username")
+ pNum := 1
+ page := c.DefaultQuery("page", "1")
+ pSize := 10
+ pageSize := c.DefaultQuery("pageSize", "10")
+ keyword := c.DefaultQuery("keyword", "")
+ if page != "" {
+ pNum, _ = strconv.Atoi(page)
+ }
+ if pageSize != "" {
+ pSize, _ = strconv.Atoi(pageSize)
+ }
+ if pNum <= 0 || pSize <= 0 {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "invalid page or page size"})
+ return
+ }
+
+ curUser, ok := common.LoginUser(c)
+ if !ok && username == "" {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
+ "error": "you need login or provide a username as query parameter.",
+ })
+ return
+ }
+ var (
+ imgs []models.Image
+ total int
+ err error
+ )
+ if username == "" {
+ username = curUser.Username
+ }
+ regionCode := c.DefaultQuery("regionCode", "ap-yichang-1")
+ req := types.ImagesByUsernameRequest{Username: username, Keyword: keyword, PageNum: pNum, PageSize: pSize, RegionCode: regionCode}
+ if curUser != nil && (curUser.Admin || curUser.Username == username) {
+ imgs, total, err = models.QueryImagesByUsername(req)
+ } else {
+ imgs, total, err = models.QueryPublicImagesByUsername(req)
+ }
+ if err != nil {
+ logger.Error(c, err, "Can't query images from database")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "internal error",
+ })
+ return
+ }
+ resps, err := convImageListResp(nil, imgs)
+ if err != nil {
+ logger.Error(c, err, "failed to conv images repsonses")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": "internal server error"})
+ return
+ }
+ c.JSON(http.StatusOK, gin.H{
+ "data": resps,
+ "total": total,
+ })
+}
+
+func writeDataToStorage(c *gin.Context, img *models.Image, fname string, size int64) (err error) {
+ logger := log.WithFunc("writeDataToStorage")
+ logger.Debugf(c, "starting to write file to storage, size %d", size)
+ defer logger.Debugf(c, "exit writing file to storage, err: %s", err)
+
+ if size < chunkThreshold {
+ if err := writeSingleFile(c, img, fname); err != nil {
+ return err
+ }
+ } else {
+ if err := writeSingleFileWithChunk(c, img, fname, size); err != nil {
+ return err
+ }
+ }
+
+ repo := img.Repo
+ tx, err := models.Instance().Beginx()
+ if err != nil {
+ logger.Error(c, err, "failed to get transaction")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "internal error, please try again",
+ })
+ return err
+ }
+ // 将镜像信息写入数据库或者覆盖
+ if err = repo.Save(tx); err != nil {
+ logger.Error(c, err, "failed to save repository to db")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "internal error",
+ })
+ return err
+ }
+ if err = repo.SaveImage(tx, img); err != nil {
+ logger.Error(c, err, "failed to save image to db")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "internal error",
+ })
+ return err
+ }
+ if err := tx.Commit(); err != nil {
+ logger.Error(c, err, "failed to commit transaction")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": "internal server error"})
+ return err
+ }
+
+ return nil
+}
+
+func processRemoteImageFile(c *gin.Context, img *models.Image, url string) error {
+ logger := log.WithFunc("processRremoteImageFile")
+ resp, err := http.Get(url) //nolint
+ if err != nil {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("failed to download remote file %s", url)})
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "failed to download url"})
+ return errors.Newf("failed to get remote file, http code: %s", resp.StatusCode)
+ }
+ fp, err := os.CreateTemp("/tmp", "download-remote-")
+ if err != nil {
+ logger.Error(c, err, "failed to create temp file")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": "internal server error"})
+ return err
+ }
+ defer os.Remove(fp.Name())
+ defer fp.Close()
+
+ h := sha256.New()
+ reader := io.TeeReader(resp.Body, h)
+ nwritten, err := io.Copy(fp, reader)
+ if err != nil {
+ logger.Error(c, err, "failed to download file")
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "failed to download url"})
+ return err
+ }
+ if err := fp.Sync(); err != nil {
+ logger.Error(c, err, "failed to sync file")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": "internal server error"})
+ return err
+ }
+ // check size
+ if img.Size == 0 {
+ img.Size = nwritten
+ }
+ if img.Size != nwritten {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "size mismatch"})
+ return terrors.ErrPlaceholder
+ }
+ // check digest
+ digest := fmt.Sprintf("%x", h.Sum(nil))
+ if img.Digest == "" {
+ img.Digest = digest
+ }
+ if digest != img.Digest {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "digest mismatch"})
+ return terrors.ErrPlaceholder
+ }
+ if img.Tag == utils.FakeTag {
+ img.Tag = digest[:10]
+ }
+ // set file pointer to start
+ if _, err := fp.Seek(0, 0); err != nil {
+ logger.Error(c, err, "failed to seek file")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": "internal server error"})
+ return err
+ }
+ return writeDataToStorage(c, img, fp.Name(), nwritten)
+}
+
+func writeSingleFileWithChunk(c *gin.Context, img *models.Image, fname string, size int64) error {
+ logger := log.WithFunc("writeSingleFileWithChunk")
+ sto := storFact.Instance()
+ uploadID, err := sto.CreateChunkWrite(c, img.Fullname())
+ if err != nil {
+ logger.Error(c, err, "Failed to save file to storage [CreateChunkWrite]")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "internal error",
+ })
+ return err
+ }
+ chunkSize := int64(300 * utils.MB)
+ nChunks := (size + chunkSize - 1) / chunkSize
+ chunkList := make([]*storTypes.ChunkInfo, nChunks)
+ errList := make([]error, nChunks)
+ var (
+ hasErr atomic.Bool
+ wg sync.WaitGroup
+ )
+ logger.Debug(c, "start uploading chunks, hasErr: %v", hasErr.Load())
+ p, _ := ants.NewPoolWithFunc(30, func(i any) {
+ defer wg.Done()
+ chunkIdx := i.(int64) //nolint
+ fp, err := os.Open(fname)
+ if err != nil {
+ errList[chunkIdx] = errors.Wrapf(err, "failed to open file %s", fname)
+ hasErr.Store(true)
+ return
+ }
+ defer fp.Close()
+
+ logger.Debugf(c, "write chunk %d", chunkIdx)
+ curSize := chunkSize
+ if chunkIdx == nChunks-1 {
+ curSize = size - chunkIdx*chunkSize
+ }
+ sReader := io.NewSectionReader(fp, chunkIdx*chunkSize, curSize)
+ cInfo := &storTypes.ChunkInfo{
+ Idx: int(chunkIdx),
+ Size: curSize,
+ ChunkSize: chunkSize,
+ Digest: "",
+ In: sReader,
+ }
+ chunkList[chunkIdx] = cInfo
+ if err = sto.ChunkWrite(c, img.Fullname(), uploadID, cInfo); err != nil {
+ errList[chunkIdx] = errors.Wrapf(err, "failed to write chunk %d", chunkIdx)
+ logger.Errorf(c, errList[chunkIdx], "failed to write chunk %d", chunkIdx)
+ hasErr.Store(true)
+ return
+ }
+ })
+ defer p.Release()
+ for chunkIdx := int64(0); chunkIdx < nChunks; chunkIdx++ {
+ wg.Add(1)
+ if err = p.Invoke(chunkIdx); err != nil {
+ logger.Errorf(c, err, "failed to submit pool task %d", chunkIdx)
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": "internal error"})
+ return err
+ }
+ }
+ wg.Wait()
+
+ logger.Debug(c, "finished uploading chunks, hasErr: %v", hasErr.Load())
+ if hasErr.Load() {
+ var outErr error
+ for idx := range errList {
+ outErr = errors.CombineErrors(outErr, errList[idx])
+ }
+ logger.Error(c, outErr, "Failed to save file to storage [ChunkWrite]")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "internal error",
+ })
+ return outErr
+ }
+ logger.Debug(c, "completing chunk write")
+ err = sto.CompleteChunkWrite(c, img.Fullname(), uploadID, chunkList)
+ if err != nil {
+ logger.Error(c, err, "Failed to save file to storage [CompleteChunkWrite]")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "internal error",
+ })
+ return err
+ }
+
+ return nil
+}
+
+func writeSingleFile(c *gin.Context, img *models.Image, fname string) error {
+ logger := log.WithFunc("writeSingleFile")
+ digest := img.Digest
+ sto := storFact.Instance()
+ fp, err := os.Open(fname)
+ if err != nil {
+ return err
+ }
+ if err := sto.Put(c, img.Fullname(), digest, fp); err != nil {
+ logger.Error(c, err, "Failed to save file to storage")
+ if errors.Is(err, terrors.ErrInvalidDigest) {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
+ "error": fmt.Sprintf("invalid digest: got: %s, user passed: %s", img.Digest, digest),
+ })
+ } else {
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "internal error",
+ })
+ }
+ return terrors.ErrPlaceholder
+ }
+ return nil
+}
diff --git a/internal/api/image/image_test.go b/internal/api/image/image_test.go
new file mode 100644
index 0000000..f5cd673
--- /dev/null
+++ b/internal/api/image/image_test.go
@@ -0,0 +1,455 @@
+package image
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "mime/multipart"
+ "net/http"
+ "net/http/httptest"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/DATA-DOG/go-sqlmock"
+ "github.com/gin-gonic/gin"
+ "github.com/projecteru2/vmihub/internal/middlewares"
+ "github.com/projecteru2/vmihub/internal/models"
+ "github.com/projecteru2/vmihub/internal/testutils"
+ "github.com/projecteru2/vmihub/internal/utils"
+ "github.com/projecteru2/vmihub/pkg/types"
+ pkgutils "github.com/projecteru2/vmihub/pkg/utils"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+ "github.com/stretchr/testify/suite"
+)
+
+const (
+ testContent = "test content"
+)
+
+var (
+ repoTableName = ((*models.Repository)(nil)).TableName()
+ repoColumns = ((*models.Repository)(nil)).ColumnNames()
+ imgTableName = ((*models.Image)(nil)).TableName()
+ imgColumns = ((*models.Image)(nil)).ColumnNames()
+)
+
+type imageTestSuite struct {
+ suite.Suite
+ r *gin.Engine
+}
+
+// func (suite *imageTestSuite) SetupSuite() {
+// gomonkey.ApplyFuncReturn(task.SendImageTask, nil)
+// }
+
+func (suite *imageTestSuite) SetupTest() {
+ t := suite.T()
+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
+ defer cancel()
+ err := testutils.Prepare(ctx, t)
+ require.NoError(t, err)
+
+ r, err := testutils.PrepareGinEngine()
+ require.NoError(t, err)
+ apiGroup := r.Group("/api/v1", middlewares.Authenticate())
+
+ SetupRouter(apiGroup)
+ suite.r = r
+}
+
+func (suite *imageTestSuite) TestGetRepoList() {
+ {
+ utils.MockRedis.FlushAll()
+ w := httptest.NewRecorder()
+ req, _ := http.NewRequest("GET", "/api/v1/repositories", nil)
+ suite.r.ServeHTTP(w, req)
+
+ suite.Equal(http.StatusBadRequest, w.Code)
+ }
+ {
+ // anonymous user
+ // prepare db data
+ utils.MockRedis.FlushAll()
+ wantRows := sqlmock.NewRows([]string{"id", "username", "name"}).
+ AddRow(1, "user1", "name1").
+ AddRow(2, "user1", "name2")
+ models.Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE username = ? AND private = ? ORDER BY updated_at DESC LIMIT ?, ?", repoColumns, repoTableName)).
+ WithArgs("user1", false, 0, 10).
+ WillReturnRows(wantRows)
+
+ w := httptest.NewRecorder()
+ req, _ := http.NewRequest("GET", "/api/v1/repositories?username=user1", nil)
+ suite.r.ServeHTTP(w, req)
+
+ suite.Equal(http.StatusOK, w.Code)
+ raw := map[string]any{}
+ err := json.Unmarshal(w.Body.Bytes(), &raw)
+ suite.Nil(err)
+ bs, err := json.Marshal(raw["data"])
+ suite.Nil(err)
+ repos := []models.Repository{}
+ err = json.Unmarshal(bs, &repos)
+ suite.Nil(err)
+ suite.Len(repos, 2)
+ suite.Equal("user1", repos[0].Username)
+ suite.Equal("name1", repos[0].Name)
+ suite.Equal("user1", repos[1].Username)
+ suite.Equal("name2", repos[1].Name)
+ }
+ {
+ // logined user
+ utils.MockRedis.FlushAll()
+ user, pass := "user1", "pass1"
+ err := testutils.PrepareUserData(user, pass)
+ suite.Nil(err)
+ // prepare db data
+ wantRows := sqlmock.NewRows([]string{"id", "username", "name"}).
+ AddRow(1, "user1", "name1").
+ AddRow(2, "user1", "name2")
+ models.Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE username = ? ORDER BY updated_at DESC LIMIT ?, ?", repoColumns, repoTableName)).
+ WithArgs("user1", 0, 10).
+ WillReturnRows(wantRows)
+ w := httptest.NewRecorder()
+ req, _ := http.NewRequest("GET", "/api/v1/repositories", nil)
+ testutils.AddAuth(req, user, pass)
+ suite.r.ServeHTTP(w, req)
+
+ suite.Equal(http.StatusOK, w.Code)
+ }
+}
+
+func (suite *imageTestSuite) TestGetImageInfo() {
+ {
+ //repository doesn't exist
+ models.Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE username = ? AND name = ?", repoColumns, repoTableName)).
+ WithArgs("user1", "name1").
+ WillReturnRows(sqlmock.NewRows([]string{"id", "username", "name", "private"}))
+ w := httptest.NewRecorder()
+ req, _ := http.NewRequest("GET", "/api/v1/image/user1/name1/info", nil)
+ suite.r.ServeHTTP(w, req)
+
+ suite.Equal(http.StatusNotFound, w.Code)
+ }
+ {
+ // anonymous user can't read private image
+ utils.MockRedis.FlushAll()
+ wantRows := sqlmock.NewRows([]string{"id", "username", "name", "private"}).
+ AddRow(1, "user1", "name1", true)
+ models.Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE username = ? AND name = ?", repoColumns, repoTableName)).
+ WithArgs("user1", "name1").
+ WillReturnRows(wantRows)
+ w := httptest.NewRecorder()
+ req, _ := http.NewRequest("GET", "/api/v1/image/user1/name1/info", nil)
+ suite.r.ServeHTTP(w, req)
+
+ suite.Equal(http.StatusForbidden, w.Code)
+ }
+ {
+ utils.MockRedis.FlushAll()
+ user, pass := "user1", "pass1"
+ err := testutils.PrepareUserData(user, pass)
+ suite.Nil(err)
+ wantRows := sqlmock.NewRows([]string{"id", "username", "name", "private"}).
+ AddRow(1, "user1", "name1", true)
+
+ models.Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE username = ? AND name = ?", repoColumns, repoTableName)).
+ WithArgs("user1", "name1").
+ WillReturnRows(wantRows)
+
+ // image doesn't exist
+ models.Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE repo_id = ? ORDER BY created_at DESC LIMIT 1", imgColumns, imgTableName)).
+ WithArgs(1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "repo_id", "tag"}))
+
+ w := httptest.NewRecorder()
+ req, _ := http.NewRequest("GET", "/api/v1/image/user1/name1/info", nil)
+ testutils.AddAuth(req, user, pass)
+ suite.r.ServeHTTP(w, req)
+
+ suite.Equal(http.StatusNotFound, w.Code)
+ }
+ {
+ utils.MockRedis.FlushAll()
+ user, pass := "user1", "pass1"
+ err := testutils.PrepareUserData(user, pass)
+ suite.Nil(err)
+ wantRows := sqlmock.NewRows([]string{"id", "username", "name", "private"}).
+ AddRow(1, "user1", "name1", true)
+ models.Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE username = ? AND name = ?", repoColumns, repoTableName)).
+ WithArgs("user1", "name1").
+ WillReturnRows(wantRows)
+
+ wantRows = sqlmock.NewRows([]string{"id", "repo_id", "tag", "os", "created_at"}).
+ AddRow(2, 1, "tag1", []byte("{}"), time.Now())
+ models.Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE repo_id = ? AND tag = ?", imgColumns, imgTableName)).
+ WithArgs(1, "tag1").
+ WillReturnRows(wantRows)
+
+ w := httptest.NewRecorder()
+ req, _ := http.NewRequest("GET", "/api/v1/image/user1/name1/info?tag=tag1", nil)
+ testutils.AddAuth(req, user, pass)
+ suite.r.ServeHTTP(w, req)
+
+ suite.Equal(http.StatusOK, w.Code)
+ raw := map[string]any{}
+ err = json.Unmarshal(w.Body.Bytes(), &raw)
+ suite.Nil(err)
+ var resp types.ImageInfoResp
+ bs, _ := json.Marshal(raw["data"])
+ err = json.Unmarshal(bs, &resp)
+ suite.Nil(err)
+ suite.Equal(resp.Username, "user1")
+ suite.Equal(resp.Name, "name1")
+ }
+}
+
+func (suite *imageTestSuite) TestCache() {
+ utils.MockRedis.FlushAll()
+ {
+ wantRows := sqlmock.NewRows([]string{"id", "username", "name", "private"}).
+ AddRow(1, "user1", "name1", false)
+ models.Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE username = ? AND name = ?", repoColumns, repoTableName)).
+ WithArgs("user1", "name1").
+ WillReturnRows(wantRows)
+ wantRows = sqlmock.NewRows([]string{"id", "repo_id", "tag", "os"}).
+ AddRow(2, 1, "tag1", []byte("{}"))
+ models.Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE repo_id = ? ORDER BY created_at DESC LIMIT 1", imgColumns, imgTableName)).
+ WithArgs(1).
+ WillReturnRows(wantRows)
+
+ w := httptest.NewRecorder()
+ req, _ := http.NewRequest("GET", "/api/v1/image/user1/name1/info", nil)
+ suite.r.ServeHTTP(w, req)
+
+ suite.Equal(http.StatusOK, w.Code)
+ raw := map[string]any{}
+ err := json.Unmarshal(w.Body.Bytes(), &raw)
+ suite.Nil(err)
+ var resp types.ImageInfoResp
+ bs, _ := json.Marshal(raw["data"])
+ err = json.Unmarshal(bs, &resp)
+ suite.Nil(err)
+ suite.Equal(resp.Username, "user1")
+ suite.Equal(resp.Name, "name1")
+ }
+ {
+ w := httptest.NewRecorder()
+ req, _ := http.NewRequest("GET", "/api/v1/image/user1/name1/info?tag=tag1", nil)
+ suite.r.ServeHTTP(w, req)
+
+ suite.Equal(http.StatusOK, w.Code)
+ raw := map[string]any{}
+ err := json.Unmarshal(w.Body.Bytes(), &raw)
+ suite.Nil(err)
+ var resp types.ImageInfoResp
+ bs, _ := json.Marshal(raw["data"])
+ err = json.Unmarshal(bs, &resp)
+ suite.Nil(err)
+ suite.Equal(resp.Username, "user1")
+ suite.Equal(resp.Name, "name1")
+ suite.Equal(resp.Tag, "tag1")
+ }
+}
+
+func (suite *imageTestSuite) TestDownloadImage() {
+ {
+ utils.MockRedis.FlushAll()
+ // anonymous user can't download private image
+ wantRows := sqlmock.NewRows([]string{"id", "username", "name", "private"}).
+ AddRow(1, "user1", "name1", true)
+ models.Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE username = ? AND name = ?", repoColumns, repoTableName)).
+ WithArgs("user1", "name1").
+ WillReturnRows(wantRows)
+ w := httptest.NewRecorder()
+ req, _ := http.NewRequest("GET", "/api/v1/image/user1/name1/download", nil)
+ suite.r.ServeHTTP(w, req)
+
+ suite.Equal(http.StatusForbidden, w.Code)
+ // suite.Equal("ok", w.Body.String())
+ }
+ {
+ utils.MockRedis.FlushAll()
+ // a user can't read a private image which belongs other user
+ user, pass := "user2", "pass2"
+ err := testutils.PrepareUserData(user, pass)
+ suite.Nil(err)
+ wantRows := sqlmock.NewRows([]string{"id", "username", "name", "private"}).
+ AddRow(1, "user1", "name1", true)
+ models.Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE username = ? AND name = ?", repoColumns, repoTableName)).
+ WithArgs("user1", "name1").
+ WillReturnRows(wantRows)
+ w := httptest.NewRecorder()
+ req, _ := http.NewRequest("GET", "/api/v1/image/user1/name1/download", nil)
+ testutils.AddAuth(req, user, pass)
+ suite.r.ServeHTTP(w, req)
+
+ suite.Equal(http.StatusForbidden, w.Code)
+ }
+ {
+ utils.MockRedis.FlushAll()
+ // normal case
+ user, pass := "user1", "pass1"
+ err := testutils.PrepareUserData(user, pass)
+ suite.Nil(err)
+ wantRows := sqlmock.NewRows([]string{"id", "username", "name", "private"}).
+ AddRow(1, "user1", "name1", true)
+
+ models.Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE username = ? AND name = ?", repoColumns, repoTableName)).
+ WithArgs("user1", "name1").
+ WillReturnRows(wantRows)
+
+ wantRows = sqlmock.NewRows([]string{"id", "repo_id", "tag"}).
+ AddRow(2, 1, "tag1")
+ models.Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE repo_id = ? AND tag = ?", imgColumns, imgTableName)).
+ WithArgs(1, "tag1").
+ WillReturnRows(wantRows)
+ sto := testutils.GetMockStorage()
+ defer sto.AssertExpectations(suite.T())
+
+ sto.On("Get", mock.Anything, mock.Anything).Return(io.NopCloser(bytes.NewBufferString(testContent)), nil).Once()
+
+ w := httptest.NewRecorder()
+ req, _ := http.NewRequest("GET", "/api/v1/image/user1/name1/download?tag=tag1", nil)
+ testutils.AddAuth(req, user, pass)
+ suite.r.ServeHTTP(w, req)
+
+ suite.Equal(http.StatusOK, w.Code)
+ suite.Equal(testContent, w.Body.String())
+ }
+}
+
+func (suite *imageTestSuite) TestUploadImage() {
+ digest, err := pkgutils.CalcDigestOfStr(testContent)
+ suite.Nil(err)
+
+ // gomonkey.ApplyFuncReturn(task.SendImageTask, nil)
+
+ body := types.ImageCreateRequest{
+ Username: "user1",
+ Name: "name1",
+ Tag: "tag1",
+ Size: int64(len(testContent)),
+ Digest: digest,
+ Format: "qcow2",
+ OS: types.OSInfo{
+ Arch: "arm64",
+ Type: "linux",
+ Distrib: "ubuntu",
+ Version: "22.04",
+ },
+ }
+ bs, _ := json.Marshal(body)
+
+ {
+ utils.MockRedis.FlushAll()
+ w := httptest.NewRecorder()
+ req, _ := http.NewRequest("POST", "/api/v1/image/user1/name1/startUpload", nil)
+ suite.r.ServeHTTP(w, req)
+
+ suite.Equal(http.StatusBadRequest, w.Code)
+ }
+ {
+ utils.MockRedis.FlushAll()
+ w := httptest.NewRecorder()
+ req, _ := http.NewRequest("POST", "/api/v1/image/user1/name1/startUpload", bytes.NewReader(bs))
+ suite.r.ServeHTTP(w, req)
+
+ suite.Equal(http.StatusUnauthorized, w.Code)
+ }
+ {
+ utils.MockRedis.FlushAll()
+ // a user can't write a image which belongs other user
+ user, pass := "user2", "pass2"
+ err := testutils.PrepareUserData(user, pass)
+ suite.Nil(err)
+ w := httptest.NewRecorder()
+ req, _ := http.NewRequest("POST", "/api/v1/image/user1/name1/startUpload", bytes.NewReader(bs))
+ testutils.AddAuth(req, user, pass)
+ suite.r.ServeHTTP(w, req)
+
+ suite.Equal(http.StatusForbidden, w.Code)
+ }
+ {
+ utils.MockRedis.FlushAll()
+ // normal case
+ user, pass := "user1", "pass1"
+ err := testutils.PrepareUserData(user, pass)
+ suite.Nil(err)
+ models.Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE username = ? AND name = ?", repoColumns, repoTableName)).
+ WithArgs("user1", "name1").
+ WillReturnRows(sqlmock.NewRows([]string{"id", "username", "name", "private"}))
+
+ // models.Mock.ExpectQuery("SELECT * FROM image WHERE repo_id = ? AND tag = ?").
+ // WithArgs(1, "latest").
+ // WillReturnRows(sqlmock.NewRows([]string{"id", "repo_id", "tag"}))
+
+ models.Mock.ExpectBegin()
+ models.Mock.ExpectExec("INSERT INTO repository(username, name, private) VALUES(?, ?, ?)").
+ WithArgs("user1", "name1", false).
+ WillReturnResult(sqlmock.NewResult(1234, 1))
+
+ osBytes, _ := json.Marshal(body.OS)
+ models.Mock.ExpectExec("INSERT INTO image(repo_id, tag, labels, size, format, os, digest, snapshot, description) VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)").
+ WithArgs(1234, "tag1", sqlmock.AnyArg(), len(testContent), "qcow2", osBytes, digest, sqlmock.AnyArg(), sqlmock.AnyArg()).
+ WillReturnResult(sqlmock.NewResult(1234, 1))
+ models.Mock.ExpectCommit()
+
+ stor := testutils.GetMockStorage()
+ defer stor.AssertExpectations(suite.T())
+
+ var once sync.Once
+ stor.On("Put", mock.Anything, mock.Anything, mock.Anything, mock.MatchedBy(func(reader io.ReadSeeker) bool {
+ // AssertExpectations will call this function second time
+ once.Do(func() {
+ bs, err := io.ReadAll(reader)
+ suite.Nil(err)
+ suite.Equal(testContent, string(bs))
+ })
+ return true
+ })).Return(nil)
+
+ w := httptest.NewRecorder()
+ req, _ := http.NewRequest("POST", "/api/v1/image/user1/name1/startUpload", bytes.NewReader(bs))
+ testutils.AddAuth(req, user, pass)
+ // req.Header.Set("Content-Type", writer.FormDataContentType())
+ suite.r.ServeHTTP(w, req)
+
+ suite.Equal(http.StatusOK, w.Code)
+ raw := map[string]any{}
+ err = json.Unmarshal(w.Body.Bytes(), &raw)
+ suite.Nil(err)
+ var resp map[string]string
+ bs, _ := json.Marshal(raw["data"])
+ err = json.Unmarshal(bs, &resp)
+ suite.Nil(err)
+ uploadID := resp["uploadID"]
+ suite.True(len(uploadID) > 0)
+
+ w = httptest.NewRecorder()
+ body := &bytes.Buffer{}
+ writer := multipart.NewWriter(body)
+ part, err := writer.CreateFormFile("file", "/tmp/haha")
+ suite.Nil(err)
+ _, err = part.Write([]byte(testContent))
+ suite.Nil(err)
+ writer.Close()
+
+ req, _ = http.NewRequest("POST", fmt.Sprintf("/api/v1/image/user1/name1/upload?uploadID=%s", uploadID), body)
+ testutils.AddAuth(req, user, pass)
+ req.Header.Set("Content-Type", writer.FormDataContentType())
+ suite.r.ServeHTTP(w, req)
+
+ suite.Equalf(http.StatusOK, w.Code, "error: %s", w.Body.String())
+ }
+}
+
+func (suite *imageTestSuite) TestDeleteImage() {
+}
+
+func TestImageTestSuite(t *testing.T) {
+ suite.Run(t, new(imageTestSuite))
+}
diff --git a/internal/api/image/util.go b/internal/api/image/util.go
new file mode 100644
index 0000000..94b1cb0
--- /dev/null
+++ b/internal/api/image/util.go
@@ -0,0 +1,216 @@
+package image
+
+import (
+ "encoding/hex"
+ "fmt"
+ "net/http"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/cockroachdb/errors"
+ "github.com/dustin/go-humanize"
+ "github.com/gin-gonic/gin"
+ "github.com/google/uuid"
+ "github.com/projecteru2/core/log"
+ "github.com/projecteru2/vmihub/internal/common"
+ "github.com/projecteru2/vmihub/internal/models"
+ "github.com/projecteru2/vmihub/internal/utils"
+ "github.com/projecteru2/vmihub/pkg/terrors"
+ "github.com/projecteru2/vmihub/pkg/types"
+)
+
+func newUploadID() (string, error) {
+ raw, err := uuid.NewUUID()
+ if err != nil {
+ return "", err
+ }
+ return hex.EncodeToString(raw[:]), nil
+}
+
+func checkRepoReadPerm(c *gin.Context, repo *models.Repository) bool {
+ if !repo.Private {
+ return true
+ }
+ curUser, exists := common.LoginUser(c)
+ if !exists {
+ return false
+ }
+ if curUser.Admin {
+ return true
+ }
+ return strings.EqualFold(curUser.Username, repo.Username)
+}
+
+func checkRepoWritePerm(c *gin.Context, repo *models.Repository) bool {
+ curUser, exists := common.LoginUser(c)
+ if !exists {
+ return false
+ }
+ if curUser.Admin {
+ return true
+ }
+ return strings.EqualFold(curUser.Username, repo.Username)
+}
+
+func getRepo(c *gin.Context, username, name string, perm string) (repo *models.Repository, err error) {
+ repo, err = models.QueryRepo(c, username, name)
+ if err != nil {
+ log.WithFunc("getRepo").Error(c, err, "failed to get repo from db")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "internal error, please try again",
+ })
+ return
+ }
+ if repo == nil {
+ c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
+ "error": "image doesn't exist",
+ })
+ err = errors.New("placeholder")
+ return
+ }
+
+ switch perm {
+ case "read":
+ if !checkRepoReadPerm(c, repo) {
+ c.AbortWithStatusJSON(http.StatusForbidden, gin.H{
+ "error": "you don't have perssion",
+ })
+ err = errors.New("placeholder")
+ return
+ }
+ case "write":
+ if !checkRepoWritePerm(c, repo) {
+ c.AbortWithStatusJSON(http.StatusForbidden, gin.H{
+ "error": "you don't have perssion",
+ })
+ err = errors.New("placeholder")
+ return
+ }
+ }
+ return
+}
+
+func getRepoImage(c *gin.Context, repo *models.Repository, tag string) (img *models.Image, err error) {
+ img, err = repo.GetImage(c, tag)
+ if err != nil {
+ log.WithFunc("getRepoImage").Errorf(c, err, "failed to get image %s:%s from db", repo.Fullname(), tag)
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "msg": "internal error",
+ })
+ return nil, err
+ }
+ if img == nil {
+ c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
+ "msg": "image doesn't exist",
+ })
+ return nil, errors.New("placeholder")
+ }
+ return img, nil
+}
+
+func validateRepoName(username, name string) (err error) {
+ if username == "" {
+ return fmt.Errorf("empty username")
+ }
+ if username == "_" {
+ return checkNames(name)
+ } else { //nolint:revive
+ return checkNames(username, name)
+ }
+}
+
+func checkNames(names ...string) (err error) {
+ for _, p := range names {
+ var matched bool
+ matched, err = regexp.MatchString(utils.NameRegex, p) //nolint
+ if err != nil {
+ return
+ }
+ if !matched {
+ err = fmt.Errorf("invalid name %s", p)
+ return
+ }
+ }
+ return
+}
+
+func validateParamTag(tag string) string {
+ if tag == "" {
+ tag = defaultTag
+ }
+ return tag
+}
+
+func validateChunkSize(c *gin.Context, chunkSize string) error {
+ if chunkSize == "" {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
+ "error": "empty chunkSize",
+ })
+ return terrors.ErrPlaceholder
+ }
+ if _, err := humanize.ParseBytes(chunkSize); err != nil {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "invalid chunk size"})
+ return terrors.ErrPlaceholder
+ }
+ return nil
+}
+
+func validateNChunks(c *gin.Context, nChunks string) error {
+ if nChunks == "" {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
+ "error": "empty nChunks",
+ })
+ return terrors.ErrPlaceholder
+ }
+ n, err := strconv.Atoi(nChunks)
+ if err != nil || n <= 0 {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
+ "error": fmt.Sprintf("invalid nChunks %s", nChunks),
+ })
+ return terrors.ErrPlaceholder
+ }
+ return nil
+}
+
+func convImageInfoResp(img *models.Image) *types.ImageInfoResp {
+ resp := &types.ImageInfoResp{
+ ID: img.ID,
+ RepoID: img.RepoID,
+ Username: img.Repo.Username,
+ Name: img.Repo.Name,
+ Tag: img.Tag,
+ Private: img.Repo.Private,
+ Format: img.Format,
+ OS: *img.OS.Get(),
+ Size: img.Size,
+ Digest: img.Digest,
+ Snapshot: img.Snapshot,
+ Description: img.Description,
+ CreatedAt: img.CreatedAt,
+ UpdatedAt: img.UpdatedAt,
+ }
+ return resp
+}
+
+func convImageListResp(repos []models.Repository, imgs []models.Image) ([]*types.ImageInfoResp, error) {
+ repoMap := map[int64]*models.Repository{}
+ for idx := range repos {
+ repoMap[repos[idx].ID] = &repos[idx]
+ }
+ resps := make([]*types.ImageInfoResp, 0, len(imgs))
+ for idx := range imgs {
+ img := &imgs[idx]
+ repo := repoMap[img.RepoID]
+ if repo == nil {
+ repo = img.Repo
+ }
+ if repo == nil {
+ return nil, errors.Newf("not repo found for image %v", img)
+ }
+ img.Repo = repo
+ resp := convImageInfoResp(img)
+ resps = append(resps, resp)
+ }
+ return resps, nil
+}
diff --git a/internal/api/router.go b/internal/api/router.go
new file mode 100644
index 0000000..94c7e25
--- /dev/null
+++ b/internal/api/router.go
@@ -0,0 +1,63 @@
+package api
+
+import (
+ "context"
+ "net/http"
+
+ ginI18n "github.com/gin-contrib/i18n"
+ "github.com/gin-contrib/sessions"
+ "github.com/gin-gonic/gin"
+ swaggerFiles "github.com/swaggo/files"
+ ginSwagger "github.com/swaggo/gin-swagger"
+
+ "github.com/projecteru2/vmihub/assets"
+ "github.com/projecteru2/vmihub/internal/api/image"
+ "github.com/projecteru2/vmihub/internal/api/user"
+ "github.com/projecteru2/vmihub/internal/middlewares"
+ "github.com/projecteru2/vmihub/internal/utils"
+ "github.com/projecteru2/vmihub/internal/utils/redissession"
+ "golang.org/x/text/language"
+ "gopkg.in/yaml.v3"
+)
+
+// SetupRouter initialize routing information
+func SetupRouter() (*gin.Engine, error) {
+ r := gin.New()
+ redisCli := utils.GetRedisConn()
+ sessStor, err := redissession.NewStore(context.TODO(), redisCli)
+ if err != nil {
+ return nil, err
+ }
+ //// 设置 session 的最大存活时间
+ // sessStor.Options(sessions.Options{
+ // MaxAge: 7200, // 有效期为2小时
+ // HttpOnly: true,
+ // Secure: false, // 如果是在 HTTPS 环境下应设为 true
+ // })
+ r.Use(sessions.Sessions("mysession", sessStor))
+
+ r.Use(ginI18n.Localize(ginI18n.WithBundle(&ginI18n.BundleCfg{
+ RootPath: "./i18n/localize",
+ AcceptLanguage: []language.Tag{language.Chinese, language.English},
+ DefaultLanguage: language.Chinese,
+ UnmarshalFunc: yaml.Unmarshal,
+ FormatBundleFile: "yaml",
+ Loader: &ginI18n.EmbedLoader{FS: assets.Assets},
+ })))
+
+ r.Use(middlewares.Cors())
+ r.Use(middlewares.Logger("vmihub"))
+
+ r.GET("/healthz", func(c *gin.Context) {
+ c.String(http.StatusOK, ginI18n.MustGetMessage(c, "healthy"))
+ })
+
+ r.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerFiles.Handler))
+
+ basePath := "/api/v1"
+ apiGroup := r.Group(basePath, middlewares.Authenticate())
+
+ image.SetupRouter(apiGroup)
+ user.SetupRouter(basePath, r)
+ return r, nil
+}
diff --git a/internal/api/router_test.go b/internal/api/router_test.go
new file mode 100644
index 0000000..0e483f6
--- /dev/null
+++ b/internal/api/router_test.go
@@ -0,0 +1,33 @@
+package api
+
+import (
+ "context"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ "github.com/projecteru2/vmihub/internal/testutils"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestHealthz(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
+ defer cancel()
+ err := testutils.Prepare(ctx, t)
+ assert.Nil(t, err)
+ router, err := SetupRouter()
+ require.NoError(t, err)
+
+ w := httptest.NewRecorder()
+ req, err := http.NewRequest("GET", "/healthz", nil)
+ require.NoError(t, err)
+
+ req.Header.Set("Accept-Language", "en")
+
+ router.ServeHTTP(w, req)
+
+ assert.Equal(t, 200, w.Code)
+ assert.Equal(t, "Healthy", w.Body.String())
+}
diff --git a/internal/api/user/user.go b/internal/api/user/user.go
new file mode 100644
index 0000000..19bc93c
--- /dev/null
+++ b/internal/api/user/user.go
@@ -0,0 +1,585 @@
+package user
+
+import (
+ "errors"
+ "net/http"
+ "path"
+ "strings"
+ "time"
+
+ "github.com/dgrijalva/jwt-go"
+ "github.com/gin-gonic/gin"
+ "github.com/gin-gonic/gin/binding"
+ "github.com/projecteru2/core/log"
+ "github.com/projecteru2/vmihub/config"
+ "github.com/projecteru2/vmihub/internal/common"
+ "github.com/projecteru2/vmihub/internal/middlewares"
+ "github.com/projecteru2/vmihub/internal/models"
+ "github.com/projecteru2/vmihub/internal/utils"
+ "github.com/projecteru2/vmihub/pkg/terrors"
+ "github.com/projecteru2/vmihub/pkg/types"
+)
+
+const (
+ refreshPrefix = "refresh__"
+)
+
+func SetupRouter(basePath string, r *gin.Engine) {
+ userGroup := r.Group(path.Join(basePath, "/user"))
+
+ // userGroup.Use(AuthRequired)
+
+ // Login user
+ userGroup.POST("/login", LoginUser)
+ // Logout user
+ userGroup.POST("/logout", LogoutUser)
+ // Get token
+ userGroup.POST("/token", GetUserToken)
+ // Refresh token
+ userGroup.POST("/refreshToken", RefreshToken)
+ // Get user information
+ userGroup.GET("/info", middlewares.Authenticate(), GetUserInfo)
+ // Update user
+ userGroup.POST("/info", middlewares.Authenticate(), UpdateUser)
+
+ // change password
+ userGroup.POST("/changePwd", middlewares.Authenticate(), changePwd)
+ // reset password
+ userGroup.POST("/resetPwd", resetPwd)
+ // Create private token
+ userGroup.POST("/privateToken", middlewares.Authenticate(), CreatePrivateToken)
+ // List private tokens
+ userGroup.GET("/privateTokens", middlewares.Authenticate(), ListPrivateToken)
+ // Delete private token
+ userGroup.DELETE("/privateToken", middlewares.Authenticate(), DeletePrivateToken)
+}
+
+// LoginUser login the user
+//
+// LoginUser @Summary login user
+// @Description LoginUser login user
+// @Tags 用户管理
+// @Accept json
+// @Produce json
+// @Param body body types.LoginRequest true "用户结构体"
+// @success 200 {object} types.JSONResult{} "desc"
+// @Router /user/login [post]
+func LoginUser(c *gin.Context) {
+ logger := log.WithFunc("LoginUser")
+ // check request params
+ var req types.LoginRequest
+ if err := c.ShouldBindWith(&req, binding.JSON); err != nil {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()})
+ return
+ }
+ if err := req.Check(); err != nil {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()})
+ return
+ }
+
+ user, err := models.CheckAndGetUser(c, req.Username, req.Password)
+ // query user
+ if err != nil {
+ if errors.Is(err, terrors.ErrInvalidUserPass) {
+ c.JSON(http.StatusUnauthorized, gin.H{"error": err.Error()})
+ } else {
+ logger.Error(c, err, "failed query user from db")
+ c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"})
+ }
+ return
+ }
+ if user == nil {
+ c.AbortWithStatusJSON(http.StatusNotFound, gin.H{"error": "user not found"})
+ return
+ }
+ if err := common.SaveUserSession(c, user); err != nil {
+ logger.Errorf(c, err, "failed to save user session")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": "internal server error"})
+ }
+
+ c.JSON(http.StatusOK, gin.H{
+ "msg": "login successfully",
+ })
+}
+
+// LogoutUser logout the user
+//
+// LogoutUser @Summary logout user
+// @Description LogoutUser logout user
+// @Tags 用户管理
+// @Accept json
+// @Produce json
+// @success 200 {object} types.JSONResult{} "desc"
+// @Router /user/logout [post]
+func LogoutUser(c *gin.Context) {
+ _, exists := common.LoginUser(c)
+ if !exists {
+ c.JSON(http.StatusOK, gin.H{
+ "msg": "logout successfully",
+ })
+ return
+ }
+ _ = common.DeleteUserSession(c)
+
+ c.JSON(http.StatusOK, gin.H{
+ "msg": "logout successfully",
+ })
+}
+
+// change user password
+//
+// @Summary change user password
+// @Description ChangePwd register user
+// @Tags 用户管理
+// @Accept json
+// @Produce json
+// @Param Authorization header string true "token"
+// @Param body body types.ChangeUserPwdRequest true "修改密码"
+// @success 200 {object} types.JSONResult{data=types.UserInfoResp} "desc"
+// @Router /user/changePwd [post]
+func changePwd(c *gin.Context) {
+ value, exists := c.Get("user")
+ if !exists {
+ c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error": "please login or authenticate first"})
+ return
+ }
+ user := value.(*models.User) //nolint
+ // 解析请求参数
+ var req types.ChangeUserPwdRequest
+ if err := c.ShouldBindWith(&req, binding.JSON); err != nil {
+ c.JSON(400, gin.H{"error": err.Error()})
+ return
+ }
+
+ // 修改密码
+ err := user.UpdatePwd(req.NewPassword)
+ if err != nil {
+ log.WithFunc("changePwd").Error(c, err, "Failed to change user password")
+ c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to change user password"})
+ return
+ }
+
+ _ = common.DeleteUserSession(c)
+
+ resp := types.UserInfoResp{
+ ID: user.ID,
+ Username: user.Username,
+ Email: user.Email,
+ Nickname: user.Nickname,
+ }
+ c.JSON(http.StatusCreated, gin.H{
+ "msg": "changed successfully",
+ "data": resp,
+ })
+}
+
+// reset user password
+//
+// @Summary reset user password
+// @Description ResetPwd resrt user password
+// @Tags 用户管理
+// @Accept json
+// @Produce json
+// @Param Authorization header string true "token"
+// @Param body body types.ResetUserPwdRequest true "重置密码"
+// @success 200 {object} types.JSONResult{data=types.UserInfoResp} "desc"
+// @Router /user/resetPwd [post]
+func resetPwd(c *gin.Context) {
+ logger := log.WithFunc("resetPwd")
+ // 解析请求参数
+ var req types.ResetUserPwdRequest
+ if err := c.ShouldBindWith(&req, binding.JSON); err != nil {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()})
+ return
+ }
+ if err := req.Check(); err != nil {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()})
+ return
+ }
+ user, err := models.GetUser(c, req.Phone)
+ if err != nil {
+ logger.Errorf(c, err, "failed to query user")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": err})
+ return
+ }
+ // 修改密码
+ if err := user.UpdatePwd(req.Password); err != nil {
+ logger.Error(c, err, "Failed to change user password")
+ c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to change user password"})
+ return
+ }
+
+ resp := convUserResp(user)
+ c.JSON(http.StatusCreated, gin.H{
+ "msg": "reset password successfully",
+ "data": resp,
+ })
+}
+
+// GetUserToken get user token
+//
+// GetUserToken @Summary get token
+// @Description GetUserToken get user token
+// @Tags 用户管理
+// @Accept json
+// @Produce json
+// @Param body body types.LoginRequest true "用户结构体"
+// @success 200 {object} types.JSONResult{data=types.TokenResponse} "desc"
+// @Router /user/token [post]
+func GetUserToken(c *gin.Context) {
+ logger := log.WithFunc("GetUserToken")
+ // check request params
+ var req types.LoginRequest
+ if err := c.ShouldBindWith(&req, binding.JSON); err != nil {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()})
+ return
+ }
+
+ if err := req.Check(); err != nil {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()})
+ return
+ }
+ user, err := models.CheckAndGetUser(c, req.Username, req.Password)
+ // query user
+ if err != nil {
+ if errors.Is(err, terrors.ErrInvalidUserPass) {
+ c.JSON(http.StatusUnauthorized, gin.H{"error": err.Error()})
+ } else {
+ logger.Error(c, err, "failed query user from db")
+ c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"})
+ }
+ return
+ }
+ if user == nil {
+ c.AbortWithStatusJSON(http.StatusNotFound, gin.H{"error": "user not found"})
+ return
+ }
+ j := common.NewJWT(config.GetCfg().JWT.SigningKey)
+
+ // generate access token
+ accessClaims := models.CustomClaims{
+ ID: user.ID,
+ UserName: user.Username,
+ StandardClaims: jwt.StandardClaims{
+ NotBefore: time.Now().Unix(), // signature takes effect time
+ ExpiresAt: time.Now().Unix() + 60*60*2, // 2 hours later expires
+ Issuer: "eru",
+ Subject: "access",
+ },
+ }
+ accessTokenString, err := j.CreateToken(accessClaims)
+
+ if err != nil {
+ logger.Error(c, err, "failed to sign token")
+ c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to sign token"})
+ return
+ }
+
+ // generate refresh token
+ refreshClaims := models.CustomClaims{
+ ID: user.ID,
+ UserName: user.Username,
+ StandardClaims: jwt.StandardClaims{
+ NotBefore: time.Now().Unix(), // signature takes effect time
+ ExpiresAt: time.Now().Unix() + 60*60*24, // 24 hours later expires
+ Issuer: "eru",
+ Subject: refreshPrefix + accessTokenString,
+ },
+ }
+ refreshTokenString, err := j.CreateToken(refreshClaims)
+ if err != nil {
+ logger.Error(c, err, "failed to sign token")
+ c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to sign token"})
+ return
+ }
+ if err := common.SaveUserSession(c, user); err != nil {
+ logger.Warnf(c, "failed to save user session: %s", err)
+ }
+
+ c.JSON(http.StatusOK, gin.H{
+ "data": types.TokenResponse{AccessToken: accessTokenString, RefreshToken: refreshTokenString},
+ "msg": "Success",
+ })
+}
+
+// RefreshToken refresh token
+//
+// @Summary refresh token
+// @Description RefreshToken refresh token
+// @Tags 用户管理
+// @Accept json
+// @Produce json
+// @Param body body types.RefreshRequest true "刷新Token结构体"
+// @success 200 {object} types.JSONResult{data=types.TokenResponse} "desc"
+// @Router /user/refreshToken [post]
+func RefreshToken(c *gin.Context) {
+ // check request params
+ var req types.RefreshRequest
+ if err := c.ShouldBindWith(&req, binding.JSON); err != nil {
+ c.JSON(400, gin.H{"error": err.Error()})
+ return
+ }
+
+ j := common.NewJWT(config.GetCfg().JWT.SigningKey)
+ token, err := j.ParseToken(req.RefreshToken)
+ if err != nil {
+ if err == terrors.ErrTokenExpired {
+ c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
+ "error": "refresh token has expired",
+ })
+ c.Abort()
+ return
+ }
+
+ c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
+ "error": "please login",
+ })
+ return
+ }
+ subject := token.Subject
+ if !strings.HasPrefix(subject, refreshPrefix) {
+ c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
+ "error": "invalid refresh token",
+ })
+ return
+ }
+ if subject != refreshPrefix+req.AccessToken {
+ c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
+ "error": "invalid refresh token or access token",
+ })
+ return
+ }
+
+ // generate access token
+ accessClaims := models.CustomClaims{
+ ID: token.ID,
+ UserName: token.UserName,
+ StandardClaims: jwt.StandardClaims{
+ NotBefore: time.Now().Unix(), // signature takes effect time
+ ExpiresAt: time.Now().Unix() + 60*60*2, // 2 hours later expires
+ Issuer: "eru",
+ Subject: "access",
+ },
+ }
+ accessTokenString, err := j.CreateToken(accessClaims)
+
+ if err != nil {
+ log.WithFunc("RefreshToken").Error(c, err, "failed to sign token")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": "failed to sign token"})
+ return
+ }
+
+ // generate refresh token
+ refreshClaims := models.CustomClaims{
+ ID: token.ID,
+ UserName: token.UserName,
+ StandardClaims: jwt.StandardClaims{
+ NotBefore: time.Now().Unix(), // signature takes effect time
+ ExpiresAt: time.Now().Unix() + 60*60*24, // 24 hours later expires
+ Issuer: "eru",
+ Subject: refreshPrefix + accessTokenString,
+ },
+ }
+ refreshTokenString, err := j.CreateToken(refreshClaims)
+ if err != nil {
+ log.WithFunc("RefreshToken").Error(c, err, "failed to sign token")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": "failed to sign token"})
+ return
+ }
+
+ c.JSON(http.StatusOK, gin.H{
+ "data": types.TokenResponse{AccessToken: accessTokenString, RefreshToken: refreshTokenString},
+ })
+}
+
+// GetUserInfo get user info
+//
+// @Summary get user info
+// @Description GetUserInfo get user info
+// @Tags 用户管理
+// @Accept json
+// @Produce json
+// @Param Authorization header string true "token"
+// @success 200 {object} types.JSONResult{data=types.UserInfoResp} "desc"
+// @Router /user/info [get]
+func GetUserInfo(c *gin.Context) {
+ user, exists := common.LoginUser(c)
+ if !exists {
+ c.JSON(http.StatusUnauthorized, gin.H{
+ "error": "please login",
+ })
+ return
+ }
+ resp := convUserResp(user)
+ c.JSON(http.StatusOK, gin.H{
+ "data": resp,
+ "msg": "Success",
+ })
+}
+
+// update user information
+//
+// @Summary update user information
+// @Description UpdateUser updatrs user information
+// @Tags 用户管理
+// @Accept json
+// @Produce json
+// @Param Authorization header string true "token"
+// @Param body body types.UpdateUserRequest true "重置密码"
+// @success 200 {object} types.JSONResult{data=types.UserInfoResp} "desc"
+// @Router /user/info [post]
+func UpdateUser(c *gin.Context) {
+ logger := log.WithFunc("UpdateUser")
+ // 解析请求参数
+ var req types.UpdateUserRequest
+ if err := c.ShouldBindWith(&req, binding.JSON); err != nil {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()})
+ return
+ }
+ user, exists := common.LoginUser(c)
+ if !exists {
+ c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error": "please login"})
+ return
+ }
+ if req.Email != "" {
+ user.Email = req.Email
+ }
+ if req.Nickname != "" {
+ user.Nickname = req.Nickname
+ }
+ if err := user.Update(nil); err != nil {
+ logger.Errorf(c, err, "failed to update user")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": err})
+ return
+ }
+ resp := convUserResp(user)
+ if err := common.SaveUserSession(c, user); err != nil {
+ logger.Warnf(c, "failed to save user session: %s", err)
+ }
+
+ c.JSON(http.StatusOK, gin.H{
+ "msg": "update user successfully",
+ "data": resp,
+ })
+}
+
+// createPrivateToken create a private token for currrent user
+
+// @Summary create private token
+// @Description CreatePrivateToken create a private token for currrent user
+// @Tags 用户管理
+// @Accept json
+// @Produce json
+// @Param Authorization header string true "token"
+// @Param body body types.PrivateTokenRequest true "用户结构体"
+// @success 200 {object} types.JSONResult{data=models.PrivateToken} "desc"
+// @Router /user/privateToken [post]
+func CreatePrivateToken(c *gin.Context) {
+ var req types.PrivateTokenRequest
+ if err := c.ShouldBindWith(&req, binding.JSON); err != nil {
+ c.JSON(400, gin.H{"error": err.Error()})
+ return
+ }
+ if req.ExpiredAt.Before(time.Now()) {
+ req.ExpiredAt = time.Now().AddDate(1, 0, 0)
+ }
+ value, exists := c.Get("user")
+ if !exists {
+ c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error": "please login"})
+ return
+ }
+ user := value.(*models.User) //nolint
+
+ token, err := utils.GetUniqueStr()
+ if err != nil {
+ log.WithFunc("CreatePrivateToken").Error(c, err, "failed to get token")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": "failed to get token"})
+ return
+ }
+ tokenObj := models.PrivateToken{
+ Name: req.Name,
+ UserID: user.ID,
+ Token: token,
+ ExpiredAt: req.ExpiredAt,
+ }
+ if err := tokenObj.Save(nil); err != nil {
+ log.WithFunc("CreatePrivateToken").Error(c, err, "failed to save token")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": "failed to save token"})
+ return
+ }
+ c.JSON(http.StatusOK, gin.H{
+ "data": tokenObj,
+ })
+}
+
+// ListPrivateToken list all private tokens of current user
+
+// @Summary list private token
+// @Description ListPrivateToken list all private tokens of current user
+// @Tags 用户管理
+// @Accept json
+// @Produce json
+// @Param Authorization header string true "token"
+// @success 200 {object} types.JSONResult{data=[]models.PrivateToken} "desc"
+// @Router /user/privateTokens [GET]
+func ListPrivateToken(c *gin.Context) {
+ value, exists := c.Get("user")
+ if !exists {
+ c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error": "please login"})
+ return
+ }
+ user := value.(*models.User) //nolint
+
+ tokens, err := models.QueryPrivateTokensByUser(c, user.ID)
+ if err != nil {
+ log.WithFunc("CreatePrivateToken").Error(c, err, "failed to save token")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": "failed to save token"})
+ return
+ }
+ c.JSON(http.StatusOK, gin.H{
+ "data": tokens,
+ })
+}
+
+// DeletePrivateToken delete a private token for currrent user
+
+// @Summary delete private token
+// @Description DeletePrivateToken delete a private token for currrent user
+// @Tags 用户管理
+// @Accept json
+// @Produce json
+// @Param Authorization header string true "token"
+// @Param body body types.PrivateTokenDeleteRequest true "用户结构体"
+// @success 200 {object} types.JSONResult{msg=string} "desc"
+// @Router /user/privateToken [delete]
+func DeletePrivateToken(c *gin.Context) {
+ var req types.PrivateTokenDeleteRequest
+ if err := c.ShouldBindWith(&req, binding.JSON); err != nil {
+ c.JSON(400, gin.H{"error": err.Error()})
+ return
+ }
+ value, exists := c.Get("user")
+ if !exists {
+ c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error": "please login"})
+ return
+ }
+ user := value.(*models.User) //nolint
+ t, err := models.GetPrivateTokenByUserAndName(user.ID, req.Name)
+ if err != nil {
+ log.WithFunc("DeletePrivateToken").Error(c, err, "failed to get token")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": "internal error"})
+ return
+ }
+ if t == nil {
+ c.AbortWithStatusJSON(http.StatusNotFound, gin.H{"error": "token not found"})
+ return
+ }
+ if err := t.Delete(nil); err != nil {
+ log.WithFunc("DeletePrivateToken").Error(c, err, "failed to delete token")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": "internal error"})
+ return
+ }
+ c.JSON(http.StatusOK, gin.H{
+ "msg": "success",
+ })
+}
diff --git a/internal/api/user/user_test.go b/internal/api/user/user_test.go
new file mode 100644
index 0000000..426525f
--- /dev/null
+++ b/internal/api/user/user_test.go
@@ -0,0 +1,80 @@
+package user
+
+import (
+ "context"
+ "database/sql/driver"
+ "testing"
+ "time"
+
+ "github.com/gin-gonic/gin"
+ "github.com/projecteru2/vmihub/internal/testutils"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/suite"
+ "golang.org/x/crypto/bcrypt"
+)
+
+type userTestSuite struct {
+ suite.Suite
+ r *gin.Engine
+}
+
+func (suite *userTestSuite) SetupTest() {
+ t := suite.T()
+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
+ defer cancel()
+ err := testutils.Prepare(ctx, t)
+ assert.Nil(t, err)
+ r, err := testutils.PrepareGinEngine()
+ assert.Nil(t, err)
+
+ SetupRouter("/api/v1", r)
+ suite.r = r
+}
+
+type passwdMatcher struct {
+ passwd string
+}
+
+// Match satisfies sqlmock.Argument interface
+func (a passwdMatcher) Match(v driver.Value) bool {
+ ss, ok := v.(string)
+ if !ok {
+ return false
+ }
+ if err := bcrypt.CompareHashAndPassword([]byte(ss), []byte(a.passwd)); err != nil {
+ return false
+ }
+ return true
+}
+
+// func (suite *userTestSuite) TestRegistr() {
+// obj := types.RegisterRequest{
+// Username: "user11",
+// Password: "pass11",
+// Email: "haha@qq.com",
+// Phone: "12345678901",
+// }
+// sqlStr := "SELECT * FROM user WHERE phone = ?"
+// models.Mock.ExpectQuery(sqlStr).
+// WithArgs(obj.Phone).
+// WillReturnError(sql.ErrNoRows)
+// sqlStr = "INSERT INTO user (username, phone, password, email, namespace, nickname) VALUES (?, ?, ?, ?, ?, ?)"
+
+// models.Mock.ExpectExec(sqlStr).
+// WithArgs(obj.Username, obj.Phone, passwdMatcher{obj.Password}, obj.Email, obj.Username, obj.Username).
+// WillReturnResult(sqlmock.NewResult(1234, 1))
+// models.Mock.ExpectCommit()
+// bs, err := json.Marshal(obj)
+// suite.Nil(err)
+// w := httptest.NewRecorder()
+// req, _ := http.NewRequest("POST", "/api/v1/user/register", bytes.NewBuffer(bs))
+// req.Header.Set("Content-Type", "application/json")
+// suite.r.ServeHTTP(w, req)
+
+// // fmt.Printf("+++++++++ %s\n", w.Body.String())
+// suite.Equal(http.StatusCreated, w.Code)
+// }
+
+func TestUserTestSuite(t *testing.T) {
+ suite.Run(t, new(userTestSuite))
+}
diff --git a/internal/api/user/utils.go b/internal/api/user/utils.go
new file mode 100644
index 0000000..01d8e8d
--- /dev/null
+++ b/internal/api/user/utils.go
@@ -0,0 +1,16 @@
+package user
+
+import (
+ "github.com/projecteru2/vmihub/internal/models"
+ "github.com/projecteru2/vmihub/pkg/types"
+)
+
+func convUserResp(u *models.User) *types.UserInfoResp {
+ return &types.UserInfoResp{
+ ID: u.ID,
+ Username: u.Username,
+ IsAdmin: u.Admin,
+ Email: u.Email,
+ Nickname: u.Nickname,
+ }
+}
diff --git a/internal/common/image.go b/internal/common/image.go
new file mode 100644
index 0000000..394ca4f
--- /dev/null
+++ b/internal/common/image.go
@@ -0,0 +1,49 @@
+package common
+
+import (
+ "net/http"
+
+ "github.com/gin-gonic/gin"
+ "github.com/projecteru2/core/log"
+ "github.com/projecteru2/vmihub/internal/models"
+ "github.com/projecteru2/vmihub/pkg/terrors"
+)
+
+func GetRepoImageForUpload(c *gin.Context, imgUser, name, tag string) (repo *models.Repository, img *models.Image, err error) {
+ logger := log.WithFunc("GetRepoImageForUpload")
+ curUser, exists := LoginUser(c)
+ if !exists {
+ c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
+ "error": "please login",
+ })
+ err = terrors.ErrPlaceholder
+ return
+ }
+ if !curUser.Admin && curUser.Username != imgUser {
+ c.AbortWithStatusJSON(http.StatusForbidden, gin.H{
+ "error": "you don't have permission to upload to this image",
+ })
+ err = terrors.ErrPlaceholder
+ return
+ }
+ repo, err = models.QueryRepo(c, imgUser, name)
+ if err != nil {
+ logger.Errorf(c, err, "can't query image: %s/%s", imgUser, name)
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "internal error",
+ })
+ return
+ }
+ if repo == nil {
+ return
+ }
+ img, err = repo.GetImage(c, tag)
+ if err != nil {
+ logger.Errorf(c, err, "can't query image tag: %s/%s:%s", imgUser, name, tag)
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "internal error",
+ })
+ return
+ }
+ return
+}
diff --git a/internal/common/jwt.go b/internal/common/jwt.go
new file mode 100644
index 0000000..b515868
--- /dev/null
+++ b/internal/common/jwt.go
@@ -0,0 +1,51 @@
+package common
+
+import (
+ "github.com/dgrijalva/jwt-go"
+ "github.com/projecteru2/vmihub/internal/models"
+ "github.com/projecteru2/vmihub/pkg/terrors"
+)
+
+type JWT struct {
+ SigningKey []byte
+}
+
+func NewJWT(key string) *JWT {
+ return &JWT{
+ []byte(key),
+ }
+}
+
+// CreateToken create token
+func (j *JWT) CreateToken(claims models.CustomClaims) (string, error) {
+ token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
+ return token.SignedString(j.SigningKey)
+}
+
+// ParseToken parse token
+func (j *JWT) ParseToken(tokenString string) (*models.CustomClaims, error) {
+ token, err := jwt.ParseWithClaims(tokenString, &models.CustomClaims{}, func(_ *jwt.Token) (any, error) {
+ return j.SigningKey, nil
+ })
+ if err != nil { //nolint:nolintlint,nestif
+ if ve, ok := err.(*jwt.ValidationError); ok {
+ switch {
+ case ve.Errors&jwt.ValidationErrorMalformed != 0:
+ return nil, terrors.ErrTokenMalformed
+ case ve.Errors&jwt.ValidationErrorExpired != 0:
+ return nil, terrors.ErrTokenExpired
+ case ve.Errors&jwt.ValidationErrorNotValidYet != 0:
+ return nil, terrors.ErrTokenNotValidYet
+ default:
+ return nil, terrors.ErrTokenInvalid
+ }
+ }
+ }
+ if token != nil {
+ if claims, ok := token.Claims.(*models.CustomClaims); ok && token.Valid {
+ return claims, nil
+ }
+ return nil, terrors.ErrTokenInvalid
+ }
+ return nil, terrors.ErrTokenInvalid
+}
diff --git a/internal/common/user.go b/internal/common/user.go
new file mode 100644
index 0000000..e21df0b
--- /dev/null
+++ b/internal/common/user.go
@@ -0,0 +1,174 @@
+package common
+
+import (
+ "encoding/base64"
+ "fmt"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/cockroachdb/errors"
+ "github.com/gin-contrib/sessions"
+ "github.com/gin-gonic/gin"
+ "github.com/projecteru2/core/log"
+ "github.com/projecteru2/vmihub/config"
+ "github.com/projecteru2/vmihub/internal/models"
+ "github.com/projecteru2/vmihub/pkg/terrors"
+)
+
+const (
+ userIDSessionKey = "userID"
+)
+
+func SaveUserSession(c *gin.Context, u *models.User) error {
+ // set session
+ session := sessions.Default(c)
+ session.Set(userIDSessionKey, u.ID)
+ return session.Save()
+}
+
+func DeleteUserSession(c *gin.Context) error {
+ // set session
+ session := sessions.Default(c)
+ session.Delete(userIDSessionKey)
+ return session.Save()
+}
+
+func AuthWithSession(c *gin.Context) (err error) {
+ sess := sessions.Default(c)
+ userID := sess.Get(userIDSessionKey)
+ var user *models.User
+ if userID != nil {
+ if user, err = models.GetUserByID(c, userID.(int64)); err != nil {
+ return err
+ }
+ }
+ if user == nil {
+ return errors.Newf("can't find user %d", userID)
+ }
+ log.WithFunc("authWithSession").Debugf(c, "authenticate with session successfully %s", user.Username)
+
+ attachUserToCtx(c, user)
+ return nil
+}
+
+func AuthWithBasic(c *gin.Context, bs64Token string) error {
+ token, err := base64.StdEncoding.DecodeString(bs64Token)
+ if err != nil {
+ c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
+ "error": fmt.Sprintf("invalid basic token %s", err),
+ })
+ return terrors.ErrPlaceholder
+ }
+ parts := strings.Split(string(token), ":")
+ if len(parts) != 2 {
+ c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
+ "error": "invalid username or password",
+ })
+ return terrors.ErrPlaceholder
+ }
+ username, password := parts[0], parts[1]
+ user, err := models.CheckAndGetUser(c, username, password)
+ if err != nil {
+ if errors.Is(err, terrors.ErrInvalidUserPass) {
+ c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
+ "error": err.Error(),
+ })
+ } else {
+ log.WithFunc("authWithUserPass").Error(c, err, "failed query user from db")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "internal error, please try again",
+ })
+ }
+ return terrors.ErrPlaceholder
+ }
+ attachUserToCtx(c, user)
+ return nil
+}
+
+func AuthWithPrivateToken(c *gin.Context, token string) error {
+ t, err := models.GetPrivateToken(token)
+ if err != nil {
+ log.WithFunc("authWithPrivateToken").Error(c, err, "failed query access token from db")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "internal error, please try again",
+ })
+ return err
+ }
+ if t == nil {
+ c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
+ "error": "invalid private token",
+ })
+ return terrors.ErrPlaceholder
+ }
+ if t.ExpiredAt.Before(time.Now()) {
+ c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
+ "error": "private token expired",
+ })
+ return terrors.ErrPlaceholder
+ }
+ _ = t.UpdateLastUsed()
+
+ // set user login info
+ user, err := t.GetUser()
+ if err != nil {
+ log.WithFunc("authWithPrivateToken").Error(c, err, "failed query user from db")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "internal error, please try again",
+ })
+ return terrors.ErrPlaceholder
+ }
+ if user == nil {
+ c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
+ "error": "invalid private token",
+ })
+ return terrors.ErrPlaceholder
+ }
+ attachUserToCtx(c, user)
+ return nil
+}
+
+func AuthWithJWT(c *gin.Context, token string) error {
+ j := NewJWT(config.GetCfg().JWT.SigningKey)
+ // parseToken parse token contain info
+ claims, err := j.ParseToken(token)
+ if err != nil {
+ if err == terrors.ErrTokenExpired {
+ c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
+ "error": "authorization has expired",
+ })
+ c.Abort()
+ return terrors.ErrPlaceholder
+ }
+
+ c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
+ "error": "please login",
+ })
+ return terrors.ErrPlaceholder
+ }
+
+ // set user login info
+ user, err := models.GetUser(c, claims.UserName)
+ if err != nil {
+ log.WithFunc("authWithUserPass").Error(c, err, "failed query user from db")
+ c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
+ "error": "internal error, please try again",
+ })
+ return terrors.ErrPlaceholder
+ }
+ if user == nil {
+ c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
+ "error": "invalid token",
+ })
+ return terrors.ErrPlaceholder
+ }
+ c.Set("claims", claims)
+ attachUserToCtx(c, user)
+ return nil
+}
+
+func attachUserToCtx(c *gin.Context, u *models.User) {
+ c.Set("userid", u.ID)
+ c.Set("username", u.Username)
+ c.Set("user", u)
+}
diff --git a/internal/common/utils.go b/internal/common/utils.go
new file mode 100644
index 0000000..1073674
--- /dev/null
+++ b/internal/common/utils.go
@@ -0,0 +1,14 @@
+package common
+
+import (
+ "github.com/gin-gonic/gin"
+ "github.com/projecteru2/vmihub/internal/models"
+)
+
+func LoginUser(c *gin.Context) (user *models.User, exists bool) {
+ value, exists := c.Get("user")
+ if exists {
+ user = value.(*models.User) //nolint
+ }
+ return
+}
diff --git a/internal/middlewares/authenticate.go b/internal/middlewares/authenticate.go
new file mode 100644
index 0000000..7f67d6e
--- /dev/null
+++ b/internal/middlewares/authenticate.go
@@ -0,0 +1,70 @@
+package middlewares
+
+import (
+ "fmt"
+ "net/http" //nolint:nolintlint,goimports
+ "strings"
+
+ "github.com/gin-gonic/gin"
+
+ "github.com/projecteru2/vmihub/internal/common"
+)
+
+func getPrivateToken(c *gin.Context) string {
+ privateToken := c.Request.Header.Get("PRIVATE-TOKEN")
+ if privateToken != "" {
+ return privateToken
+ }
+ return c.Query("private_token")
+}
+
+// Authenticate jwt middleware
+func Authenticate() gin.HandlerFunc {
+ return func(c *gin.Context) {
+ // first check session
+ if err := common.AuthWithSession(c); err == nil {
+ c.Next()
+ return
+ }
+ // check private token
+ privateToken := getPrivateToken(c)
+ if privateToken != "" {
+ err := common.AuthWithPrivateToken(c, privateToken)
+ if err != nil {
+ return
+ }
+ c.Next()
+ return
+ }
+ // check jwt and basic
+ token := c.Request.Header.Get("Authorization")
+ if token == "" {
+ c.Next()
+ return
+ }
+
+ var err error
+ parts := strings.Split(token, " ")
+ if len(parts) != 2 {
+ c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
+ "error": fmt.Sprintf("invalid token %s", token),
+ })
+ return
+ }
+ switch parts[0] {
+ case "Bearer":
+ err = common.AuthWithJWT(c, parts[1])
+ case "Basic":
+ err = common.AuthWithBasic(c, parts[1])
+ default:
+ c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
+ "error": fmt.Sprintf("invalid token %s", token),
+ })
+ return
+ }
+ if err != nil {
+ return
+ }
+ c.Next()
+ }
+}
diff --git a/internal/middlewares/cors.go b/internal/middlewares/cors.go
new file mode 100644
index 0000000..0dd399a
--- /dev/null
+++ b/internal/middlewares/cors.go
@@ -0,0 +1,25 @@
+package middlewares
+
+import ( //nolint:goimports
+ "github.com/gin-contrib/cors"
+ "github.com/gin-gonic/gin"
+ "time" //nolint:goimports
+)
+
+func Cors() gin.HandlerFunc {
+ return cors.New(cors.Config{
+ AllowOrigins: []string{"*"},
+ AllowMethods: []string{"GET", "POST", "DELETE", "OPTIONS"},
+ AllowHeaders: []string{"Origin", "X-Requested-With", "Content-Type", "Accept", "X-CSRF-TOKEN", "Authorization"},
+ ExposeHeaders: []string{"Content-Length", "Content-Disposition"},
+ AllowCredentials: true,
+ // AllowOriginFunc: func(origin string) bool {
+ // allowOrigins := map[string]bool {
+ // "http://localhost:3000": true,
+ // }
+ // _, ok := allowOrigins[origin]
+ // return ok
+ // },
+ MaxAge: 1 * time.Hour,
+ })
+}
diff --git a/internal/middlewares/gin_zerolog.go b/internal/middlewares/gin_zerolog.go
new file mode 100644
index 0000000..fcaeb9b
--- /dev/null
+++ b/internal/middlewares/gin_zerolog.go
@@ -0,0 +1,102 @@
+package middlewares
+
+import (
+ "time"
+
+ "github.com/gin-gonic/gin"
+ "github.com/projecteru2/core/log"
+)
+
+type ginHands struct {
+ SerName string
+ Path string
+ Latency time.Duration
+ Method string
+ StatusCode int
+ ClientIP string
+ MsgStr string
+}
+
+func ErrorLogger() gin.HandlerFunc {
+ return ErrorLoggerT(gin.ErrorTypeAny)
+}
+
+func ErrorLoggerT(typ gin.ErrorType) gin.HandlerFunc {
+ return func(c *gin.Context) {
+ c.Next()
+
+ if !c.Writer.Written() {
+ json := c.Errors.ByType(typ).JSON()
+ if json != nil {
+ c.JSON(-1, json)
+ }
+ }
+ }
+}
+
+func Logger(serName string) gin.HandlerFunc {
+ return func(c *gin.Context) {
+ t := time.Now()
+ // before request
+ path := c.Request.URL.Path
+ raw := c.Request.URL.RawQuery
+ c.Next()
+ // after request
+ // latency := time.Since(t)
+ // clientIP := c.ClientIP()
+ // method := c.Request.Method
+ // statusCode := c.Writer.Status()
+ if raw != "" {
+ path = path + "?" + raw
+ }
+ msg := c.Errors.String()
+ if msg == "" {
+ msg = "Request"
+ }
+ cData := &ginHands{
+ SerName: serName,
+ Path: path,
+ Latency: time.Since(t),
+ Method: c.Request.Method,
+ StatusCode: c.Writer.Status(),
+ ClientIP: c.ClientIP(),
+ MsgStr: msg,
+ }
+
+ logSwitch(cData)
+ }
+}
+
+func logSwitch(data *ginHands) {
+ switch {
+ case data.StatusCode >= 400 && data.StatusCode < 500:
+ log.GetGlobalLogger().Warn().
+ Str("ser_name", data.SerName).
+ Str("method", data.Method).
+ Str("path", data.Path).
+ Dur("resp_time", data.Latency).
+ Int("status", data.StatusCode).
+ Str("client_ip", data.ClientIP).
+ Msg(data.MsgStr)
+
+ case data.StatusCode >= 500: //nolint
+ log.GetGlobalLogger().Error().
+ Str("ser_name", data.SerName).
+ Str("method", data.Method).
+ Str("path", data.Path).
+ Dur("resp_time", data.Latency).
+ Int("status", data.StatusCode).
+ Str("client_ip", data.ClientIP).
+ Msg(data.MsgStr)
+
+ default:
+ log.GetGlobalLogger().Info().
+ Str("ser_name", data.SerName).
+ Str("method", data.Method).
+ Str("path", data.Path).
+ Dur("resp_time", data.Latency).
+ Int("status", data.StatusCode).
+ Str("client_ip", data.ClientIP).
+ Msg(data.MsgStr)
+ }
+}
diff --git a/internal/middlewares/init.go b/internal/middlewares/init.go
new file mode 100644
index 0000000..ea553b7
--- /dev/null
+++ b/internal/middlewares/init.go
@@ -0,0 +1 @@
+package middlewares
diff --git a/internal/middlewares/loginRequired.go b/internal/middlewares/loginRequired.go
new file mode 100644
index 0000000..1f738c1
--- /dev/null
+++ b/internal/middlewares/loginRequired.go
@@ -0,0 +1,22 @@
+package middlewares
+
+import (
+ "net/http"
+
+ //nolint:nolintlint,goimports
+ "github.com/gin-gonic/gin"
+)
+
+// LoginRequired middleware
+func LoginRequired() gin.HandlerFunc {
+ return func(c *gin.Context) {
+ _, exists := c.Get("user")
+ if !exists {
+ c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
+ "error": "not logged in",
+ })
+ return
+ }
+ c.Next()
+ }
+}
diff --git a/internal/models/const.go b/internal/models/const.go
new file mode 100644
index 0000000..71d20d4
--- /dev/null
+++ b/internal/models/const.go
@@ -0,0 +1,9 @@
+package models
+
+const (
+ redistRepoKey = "/vmihub/repo/%s/%s"
+ redisImageKey = "/vmihub/image/%s/%s/%s"
+
+ redisUserKey = "/vmihub/user/%s"
+ redisUserIDKey = "/vmihub/userId/%d"
+)
diff --git a/internal/models/image.go b/internal/models/image.go
new file mode 100644
index 0000000..5a524ed
--- /dev/null
+++ b/internal/models/image.go
@@ -0,0 +1,632 @@
+package models
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/duke-git/lancet/strutil"
+ "github.com/jmoiron/sqlx"
+ "github.com/pkg/errors"
+ "github.com/projecteru2/vmihub/internal/utils"
+ "github.com/projecteru2/vmihub/pkg/types"
+ pkgutils "github.com/projecteru2/vmihub/pkg/utils"
+ "github.com/redis/go-redis/v9"
+ "github.com/samber/lo"
+)
+
+const (
+ ImageFormatQcow2 = "qcow2"
+ ImageFormatRaw = "raw"
+ ImageFormatRBD = "rbd"
+)
+
+type Repository struct {
+ ID int64 `db:"id" json:"id"`
+ Username string `db:"username" json:"username" description:"image's username"`
+ Name string `db:"name" json:"name" description:"image name"`
+ Private bool `db:"private" json:"private" description:"image is private"`
+ CreatedAt time.Time `db:"created_at" json:"createdAt" description:"image create time"`
+ UpdatedAt time.Time `db:"updated_at" json:"updatedAt" description:"image update time"`
+ Images []Image `db:"-" json:"-"`
+}
+
+func (*Repository) TableName() string {
+ return "repository"
+}
+
+func (repo *Repository) ColumnNames() string {
+ names := GetColumnNames(repo)
+ return strings.Join(names, ", ")
+}
+
+type Image struct {
+ ID int64 `db:"id" json:"id"`
+ RepoID int64 `db:"repo_id" json:"repoId"`
+ Tag string `db:"tag" json:"tag" description:"image tag, default:latest"`
+ Labels JSONColumn[Labels] `db:"labels" json:"labels"`
+ Size int64 `db:"size" json:"size" description:"actual file size(in bytes)"`
+ VirtualSize int64 `db:"virtual_size" json:"virtualSize" description:"virtual size of image file"`
+ Digest string `db:"digest" json:"digest" description:"image digest"`
+ Format string `db:"format" json:"format" description:"image format"`
+ OS JSONColumn[types.OSInfo] `db:"os" json:"os"`
+ Snapshot string `db:"snapshot" json:"snapshot" description:"RBD Snapshot for this image, eg: eru/ubuntu-18.04@v1"`
+ Description string `db:"description" json:"description" description:"image description"`
+ CreatedAt time.Time `db:"created_at" json:"createdAt" description:"image create time"`
+ UpdatedAt time.Time `db:"updated_at" json:"updatedAt" description:"image update time"`
+ Repo *Repository `db:"-" json:"repo"`
+}
+
+func (*Image) TableName() string {
+ return "image"
+}
+
+func (img *Image) ColumnNames() string {
+ names := GetColumnNames(img)
+ return strings.Join(names, ", ")
+}
+
+func (repo *Repository) Fullname() string {
+ return fmt.Sprintf("%s/%s", repo.Username, repo.Name)
+}
+
+func (img *Image) Fullname() string {
+ return fmt.Sprintf("%s/%s:%s", img.Repo.Username, img.Repo.Name, img.Tag)
+}
+
+func (img *Image) NormalizeName() string {
+ if img.Repo.Username == "_" {
+ return fmt.Sprintf("%s:%s", img.Repo.Name, img.Tag)
+ }
+ return fmt.Sprintf("%s/%s:%s", img.Repo.Username, img.Repo.Name, img.Tag)
+}
+
+func (img *Image) SliceName() string {
+ return fmt.Sprintf("%s/_slice_%s:%s", img.Repo.Username, img.Repo.Name, img.Tag)
+}
+
+func (img *Image) GetRepo() (*Repository, error) {
+ var err error
+ if img.Repo == nil {
+ img.Repo, err = QueryRepoByID(context.TODO(), img.RepoID)
+ }
+ return img.Repo, err
+}
+
+func (repo *Repository) GetImages() ([]Image, error) {
+ var err error
+
+ tblName := ((*Image)(nil)).TableName()
+ columns := ((*Image)(nil)).ColumnNames()
+ sqlStr := fmt.Sprintf("SELECT %s FROM %s WHERE repo_id = ? ORDER BY updated_at DESC", columns, tblName)
+ err = db.Select(&repo.Images, sqlStr, repo.ID)
+ if err != nil {
+ return nil, err
+ }
+ for idx := 0; idx < len(repo.Images); idx++ {
+ repo.Images[idx].Repo = repo
+ }
+ return repo.Images, nil
+}
+
+func (repo *Repository) GetImage(ctx context.Context, tag string) (*Image, error) {
+ var (
+ img *Image
+ err error
+ )
+ if !utils.IsDefaultTag(tag) {
+ if img, err = getImageFromRedis(ctx, repo, tag); err != nil {
+ return nil, err
+ }
+ }
+ if img != nil {
+ return img, nil
+ }
+ img = &Image{}
+ tblName := ((*Image)(nil)).TableName()
+ columns := ((*Image)(nil)).ColumnNames()
+ if utils.IsDefaultTag(tag) {
+ sqlStr := fmt.Sprintf("SELECT %s FROM %s WHERE repo_id = ? ORDER BY created_at DESC LIMIT 1", columns, tblName)
+ err = db.Get(img, sqlStr, repo.ID)
+ } else {
+ sqlStr := fmt.Sprintf("SELECT %s FROM %s WHERE repo_id = ? AND tag = ?", columns, tblName)
+ err = db.Get(img, sqlStr, repo.ID, tag)
+ }
+
+ if err == sql.ErrNoRows {
+ return nil, nil //nolint:nilnil
+ }
+ if err != nil {
+ return nil, err
+ }
+ img.Repo = repo
+ if err = setImageToRedis(ctx, img); err != nil {
+ return nil, err
+ }
+ return img, nil
+}
+
+func (repo *Repository) Delete(tx *sqlx.Tx) (err error) {
+ if tx == nil {
+ tx, _ = db.Beginx()
+ defer func() {
+ if err == nil {
+ _ = tx.Commit()
+ }
+ }()
+ }
+ defer func() {
+ if err == nil {
+ _ = deleteRepoInRedis(context.TODO(), repo)
+ }
+ }()
+ // delete images belongs to this repository
+ sqlStr := "DELETE FROM image WHERE repo_id = ?"
+
+ if _, err := tx.Exec(sqlStr, repo.ID); err != nil {
+ _ = tx.Rollback()
+ return errors.Wrapf(err, "falid to delete images")
+ }
+ sqlStr = "DELETE FROM repository WHERE id = ?"
+ _, err = tx.Exec(sqlStr, repo.ID)
+ if err != nil {
+ tx.Rollback() //nolint:errcheck
+ return errors.Wrapf(err, "falid to delete image")
+ }
+ return nil
+}
+
+func (repo *Repository) DeleteImage(tx *sqlx.Tx, tag string) (err error) {
+ if tx == nil {
+ tx, _ = db.Beginx()
+ defer func() {
+ if err == nil {
+ _ = tx.Commit()
+ }
+ }()
+ }
+ defer func() {
+ if err == nil {
+ _ = deleteImageInRedis(context.TODO(), repo, tag)
+ }
+ }()
+ // delete tags
+ sqlStr := "DELETE FROM image WHERE repo_id = ? AND tag = ?"
+
+ if _, err := tx.Exec(sqlStr, repo.ID, tag); err != nil {
+ _ = tx.Rollback()
+ return errors.Wrapf(err, "falid to delete image")
+ }
+ return nil
+}
+
+// only save image itself, don't save tags
+func (repo *Repository) Save(tx *sqlx.Tx) (err error) {
+ if tx == nil {
+ tx, _ = db.Beginx()
+ defer func() {
+ if err == nil {
+ _ = tx.Commit()
+ }
+ }()
+ }
+ defer func() {
+ if err == nil {
+ _ = deleteRepoInRedis(context.TODO(), repo)
+ }
+ }()
+ var sqlRes sql.Result
+ if repo.ID > 0 {
+ sqlStr := "UPDATE repository SET private = ? WHERE username = ? and name = ?"
+ _, err = tx.Exec(sqlStr, repo.Private, repo.Username, repo.Name)
+ if err != nil {
+ _ = tx.Rollback()
+ return errors.Wrapf(err, "failed to update repository: %v", repo)
+ }
+ } else {
+ sqlStr := "INSERT INTO repository(username, name, private) VALUES(?, ?, ?)"
+ sqlRes, err = tx.Exec(sqlStr, repo.Username, repo.Name, repo.Private)
+ if err != nil {
+ _ = tx.Rollback()
+ return errors.Wrapf(err, "failed to insert repository: %v", repo)
+ }
+ // fetch image id
+ repo.ID, err = sqlRes.LastInsertId()
+ if err != nil { //nolint
+ // TODO query the new record
+ }
+ }
+ return
+}
+
+func (repo *Repository) SaveImage(tx *sqlx.Tx, img *Image) (err error) {
+ if tx == nil {
+ tx, _ = db.Beginx()
+ defer func() {
+ if err == nil {
+ _ = tx.Commit()
+ }
+ }()
+ }
+ defer func() {
+ if err == nil {
+ _ = deleteImageInRedis(context.TODO(), repo, img.Tag)
+ }
+ }()
+ labels, err := img.Labels.Value()
+ if err != nil {
+ _ = tx.Rollback()
+ return err
+ }
+
+ var sqlRes sql.Result
+ if img.ID > 0 { //nolint
+ sqlStr := "UPDATE image SET digest = ?, size=?, snapshot=? WHERE id = ?"
+ _, err = tx.Exec(sqlStr, img.Digest, img.Size, img.Snapshot, img.ID)
+ if err != nil {
+ _ = tx.Rollback()
+ return errors.Wrapf(err, "failed to update image: %v", img)
+ }
+ } else {
+ osVal, err := img.OS.Value()
+ if err != nil {
+ _ = tx.Rollback()
+ return errors.Wrapf(err, "failed to insert image: %v", img)
+ }
+ sqlStr := "INSERT INTO image(repo_id, tag, labels, size, format, os, digest, snapshot, description) VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)"
+ img.RepoID = repo.ID
+ sqlRes, err = tx.Exec(sqlStr, img.RepoID, img.Tag, labels, img.Size, img.Format, osVal, img.Digest, img.Snapshot, img.Description)
+ if err != nil {
+ _ = tx.Rollback()
+ return errors.Wrapf(err, "failed to insert image: %v", img)
+ }
+ img.ID, err = sqlRes.LastInsertId()
+ if err != nil { //nolint
+ // TODO query the new record
+ }
+ }
+ return nil
+}
+
+func QueryRepoList(user string, pNum, pSize int) (ans []Repository, err error) {
+ tblName := ((*Repository)(nil)).TableName()
+ columns := ((*Repository)(nil)).ColumnNames()
+ offset := (pNum - 1) * pSize
+ sqlStr := fmt.Sprintf("SELECT %s FROM %s WHERE username = ? ORDER BY updated_at DESC LIMIT ?, ?", columns, tblName)
+ err = db.Select(&ans, sqlStr, user, offset, pSize)
+ if err != nil {
+ return
+ }
+ return
+}
+
+func QueryPublicRepoList(user string, pNum, pSize int) (ans []Repository, err error) {
+ tblName := ((*Repository)(nil)).TableName()
+ columns := ((*Repository)(nil)).ColumnNames()
+ offset := (pNum - 1) * pSize
+ sqlStr := fmt.Sprintf("SELECT %s FROM %s WHERE username = ? AND private = ? ORDER BY updated_at DESC LIMIT ?, ?", columns, tblName)
+ err = db.Select(&ans, sqlStr, user, false, offset, pSize)
+ if err != nil {
+ return
+ }
+ return
+}
+
+/*
+QueryRepo get image info
+@Param username string false "用户名"
+@Param name string true "镜像名"
+@Param tag string true "镜像标签"
+*/
+func QueryRepo(ctx context.Context, username, name string) (*Repository, error) {
+ tblName := ((*Repository)(nil)).TableName()
+ columns := ((*Repository)(nil)).ColumnNames()
+ repo, err := getRepoFromRedis(ctx, username, name)
+ if err != nil {
+ return nil, err
+ }
+ if repo != nil {
+ return repo, nil
+ }
+ repo = &Repository{}
+ sqlStr := fmt.Sprintf("SELECT %s FROM %s WHERE username = ? AND name = ?", columns, tblName)
+ err = db.Get(repo, sqlStr, username, name)
+ if err == sql.ErrNoRows {
+ return nil, nil //nolint:nilnil
+ }
+ if err != nil {
+ return nil, err
+ }
+ if err = setRepoToRedis(ctx, repo); err != nil {
+ return nil, err
+ }
+ return repo, nil
+}
+
+func QueryRepoByID(ctx context.Context, id int64) (*Repository, error) {
+ tblName := ((*Repository)(nil)).TableName()
+ columns := ((*Repository)(nil)).ColumnNames()
+ repo := &Repository{}
+ sqlStr := fmt.Sprintf("SELECT %s FROM %s WHERE id = ?", columns, tblName)
+
+ err := db.Get(repo, sqlStr, id)
+
+ if err == sql.ErrNoRows {
+ return nil, nil //nolint:nilnil
+ }
+ if err != nil {
+ return nil, err
+ }
+ if err = setRepoToRedis(ctx, repo); err != nil {
+ return nil, err
+ }
+ return repo, nil
+}
+
+func QueryImagesByRepoIDs(repoIDs []int64, keyword string, pNum, pSize int) (ans []Image, count int, err error) {
+ if len(repoIDs) == 0 {
+ return nil, 0, nil
+ }
+ offset := (pNum - 1) * pSize
+ repoSIDs := lo.Map(repoIDs, func(id int64, _ int) string {
+ return strconv.FormatInt(id, 10)
+ })
+ sqlStr := "SELECT * FROM image WHERE repo_id IN (?) AND name like CONCAT('%', CONCAT(?, '%')) ORDER BY updated_at DESC LIMIT ?, ?"
+ idsStr := strings.Join(repoSIDs, ",")
+ err = db.Select(&ans, sqlStr, idsStr, keyword, offset, pSize)
+ if err != nil {
+ return
+ }
+ sqlStr = "SELECT count(*) FROM image WHERE repo_id IN (?) AND name like CONCAT('%', CONCAT(?, '%'))"
+ row := db.QueryRow(sqlStr, idsStr, keyword)
+ if err = row.Scan(&count); err != nil {
+ return
+ }
+ return
+}
+
+type combainResult struct {
+ Image
+ Username string `db:"username" json:"username"`
+ Name string `db:"name" json:"name"`
+ Private bool `db:"private" json:"private"`
+}
+
+func QueryImagesByUsername(req types.ImagesByUsernameRequest) (ans []Image, count int, err error) {
+ var rows *sqlx.Rows
+ var sRow *sql.Row
+ offset := (req.PageNum - 1) * req.PageSize
+ sqlStr := `SELECT i.id, i.repo_id, r.username, r.name, r.private, i.tag, i.size, i.digest, i.format, i.os,
+ i.snapshot, i.description, i.created_at, i.updated_at, i.labels, i.region_code
+ FROM image i, repository r
+ WHERE r.id=i.repo_id AND
+ r.username=? AND
+ r.name like CONCAT('%', CONCAT(?, '%'))
+ ORDER BY i.updated_at DESC LIMIT ?, ?`
+ if strutil.IsBlank(req.RegionCode) {
+ rows, err = db.Queryx(sqlStr, req.Username, req.Keyword, offset, req.PageSize)
+ } else {
+ sqlStr = `SELECT i.id, i.repo_id, r.username, r.name, r.private, i.tag, i.size, i.digest, i.format, i.os,
+ i.snapshot, i.description, i.created_at, i.updated_at, i.labels, i.region_code
+ FROM image i, repository r
+ WHERE r.id=i.repo_id AND
+ r.username=? AND
+ r.name like CONCAT('%', CONCAT(?, '%')) AND
+ r.region_code=? AND
+ i.region_code=?
+ ORDER BY i.updated_at DESC LIMIT ?, ?`
+ rows, err = db.Queryx(sqlStr, req.Username, req.Keyword, req.RegionCode, req.RegionCode, offset, req.PageSize)
+ }
+ if err != nil {
+ return nil, 0, err
+ }
+ for rows.Next() {
+ var res combainResult
+ if err = rows.StructScan(&res); err != nil {
+ return nil, 0, err
+ }
+ res.Image.Repo = &Repository{
+ Username: res.Username,
+ Name: res.Name,
+ Private: res.Private,
+ }
+ ans = append(ans, res.Image)
+ }
+ sqlStr = `SELECT count(*)
+ FROM image i, repository r
+ WHERE r.id=i.repo_id AND
+ r.username=? AND
+ r.name like CONCAT('%', CONCAT(?, '%'))
+ `
+ if strutil.IsBlank(req.RegionCode) {
+ sRow = db.QueryRow(sqlStr, req.Username, req.Keyword)
+ } else {
+ sqlStr = `SELECT count(*)
+ FROM image i, repository r
+ WHERE r.id=i.repo_id AND
+ r.username=? AND
+ r.name like CONCAT('%', CONCAT(?, '%')) AND
+ r.region_code=? AND
+ i.region_code=?
+ `
+ sRow = db.QueryRow(sqlStr, req.Username, req.Keyword, req.RegionCode, req.RegionCode)
+ }
+ if err = sRow.Scan(&count); err != nil {
+ return nil, 0, err
+ }
+ return ans, count, nil
+
+}
+
+func QueryPublicImagesByUsername(req types.ImagesByUsernameRequest) (ans []Image, count int, err error) {
+ var rows *sqlx.Rows
+ var sRow *sql.Row
+ offset := (req.PageNum - 1) * req.PageSize
+ sqlStr := `SELECT i.id, i.repo_id, r.username, r.name, i.tag, i.size, i.digest, i.format, i.os,
+ i.snapshot, i.description, i.created_at, i.updated_at, i.labels, i.region_code
+ FROM image i, repository r
+ WHERE r.id=i.repo_id AND
+ r.username=? AND
+ r.private=0 AND
+ r.name like CONCAT('%', CONCAT(?, '%'))
+ ORDER BY i.updated_at DESC LIMIT ?, ?`
+ if strutil.IsBlank(req.RegionCode) {
+ rows, err = db.Queryx(sqlStr, req.Username, req.Keyword, offset, req.PageSize)
+ } else {
+ sqlStr = `SELECT i.id, i.repo_id, r.username, r.name, i.tag, i.size, i.digest, i.format, i.os,
+ i.snapshot, i.description, i.created_at, i.updated_at, i.labels,i.region_code
+ FROM image i, repository r
+ WHERE r.id=i.repo_id AND
+ r.username=? AND
+ r.private=0 AND
+ r.name like CONCAT('%', CONCAT(?, '%')) AND
+ r.region_code=? AND
+ i.region_code=?
+ ORDER BY i.updated_at DESC LIMIT ?, ?`
+ rows, err = db.Queryx(sqlStr, req.Username, req.Keyword, req.RegionCode, req.RegionCode, offset, req.PageSize)
+ }
+ if err != nil {
+ return nil, 0, err
+ }
+ for rows.Next() {
+ var res combainResult
+ if err = rows.StructScan(&res); err != nil {
+ return nil, 0, err
+ }
+ res.Image.Repo = &Repository{
+ Username: res.Username,
+ Name: res.Name,
+ Private: res.Private,
+ }
+ ans = append(ans, res.Image)
+ }
+ sqlStr = `SELECT count(*)
+ FROM image i, repository r
+ WHERE r.id=i.repo_id AND
+ r.username=? AND
+ r.private=0 AND
+ r.name like CONCAT('%', CONCAT(?, '%'))
+ `
+ if strutil.IsBlank(req.RegionCode) {
+ sRow = db.QueryRow(sqlStr, req.Username, req.Keyword)
+ } else {
+ sqlStr = `SELECT count(*)
+ FROM image i, repository r
+ WHERE r.id=i.repo_id AND
+ r.username=? AND
+ r.private=0 AND
+ r.name like CONCAT('%', CONCAT(?, '%')) AND
+ r.region_code=? AND
+ i.region_code=?
+ `
+ sRow = db.QueryRow(sqlStr, req.Username, req.Keyword, req.RegionCode, req.RegionCode)
+ }
+ if err = sRow.Scan(&count); err != nil {
+ return nil, 0, err
+ }
+ return ans, count, nil
+
+}
+
+func getRepoFromRedis(ctx context.Context, username, name string) (repo *Repository, err error) {
+ rKey := fmt.Sprintf(redistRepoKey, username, name)
+ repo = &Repository{}
+ err = utils.GetObjFromRedis(ctx, rKey, repo)
+ if err == redis.Nil {
+ return nil, nil //nolint
+ }
+ return
+}
+
+func setRepoToRedis(ctx context.Context, repo *Repository) error {
+ rKey := fmt.Sprintf(redistRepoKey, repo.Username, repo.Name)
+ return utils.SetObjToRedis(ctx, rKey, repo, 10*time.Minute)
+}
+
+func deleteRepoInRedis(ctx context.Context, repo *Repository) error {
+ rKey := fmt.Sprintf(redistRepoKey, repo.Username, repo.Name)
+ return utils.DeleteObjectsInRedis(ctx, rKey)
+}
+
+func getImageFromRedis(ctx context.Context, repo *Repository, tag string) (img *Image, err error) {
+ rKey := fmt.Sprintf(redisImageKey, repo.Username, repo.Name, tag)
+ img = &Image{}
+ err = utils.GetObjFromRedis(ctx, rKey, img)
+ if err == redis.Nil {
+ return nil, nil //nolint
+ }
+ return
+}
+
+func setImageToRedis(ctx context.Context, img *Image) error {
+ rKey := fmt.Sprintf(redisImageKey, img.Repo.Username, img.Repo.Name, img.Tag)
+ return utils.SetObjToRedis(ctx, rKey, img, 10*time.Minute)
+}
+
+func deleteImageInRedis(ctx context.Context, repo *Repository, tag string) error {
+ rKey := fmt.Sprintf(redisImageKey, repo.Username, repo.Name, tag)
+ return utils.DeleteObjectsInRedis(ctx, rKey)
+}
+
+func GetPublicImages(_ context.Context) ([]Image, error) {
+ images := make([]Image, 0)
+ repos := make([]*Repository, 0)
+ tblName := ((*Repository)(nil)).TableName()
+ columns := ((*Repository)(nil)).ColumnNames()
+ sqlStr := fmt.Sprintf("SELECT %s FROM %s WHERE private = ? ORDER BY updated_at DESC", columns, tblName)
+ err := db.Select(&repos, sqlStr, false)
+ if err != nil {
+ return nil, err
+ }
+ for _, repo := range repos {
+ repoImages, err := repo.GetImages()
+ if err != nil {
+ return nil, err
+ }
+ for _, image := range repoImages { //nolint
+ images = append(images, image)
+ }
+ }
+ return images, nil
+}
+
+func GetImageByFullname(ctx context.Context, fullname string) (*Image, error) {
+ username, name, tag, err := pkgutils.ParseImageName(fullname)
+ if err != nil {
+ return nil, err
+ }
+ repo, err := QueryRepo(ctx, username, name)
+ if err != nil || repo == nil {
+ return nil, err
+ }
+ image, err := repo.GetImage(ctx, tag)
+ if err != nil {
+ return nil, err
+ }
+
+ return image, nil
+}
+
+func GetImageByID(_ context.Context, id int64) (*Image, error) {
+ imgTblName := ((*Image)(nil)).TableName()
+ imgColumnsName := ((*Image)(nil)).ColumnNames()
+ sqlStr := fmt.Sprintf("SELECT %s FROM %s where id = ?", imgColumnsName, imgTblName)
+ image := Image{}
+ err := db.Get(&image, sqlStr, id)
+ if err != nil {
+ return nil, err
+ }
+ repoTblName := ((*Repository)(nil)).TableName()
+ repoColumnsName := ((*Repository)(nil)).ColumnNames()
+ sqlStr = fmt.Sprintf("SELECT %s FROM %s WHERE id = ?", repoColumnsName, repoTblName)
+ repo := Repository{}
+ err = db.Get(&repo, sqlStr, image.RepoID)
+ if err != nil {
+ return nil, err
+ }
+ image.Repo = &repo
+ return &image, nil
+}
diff --git a/internal/models/image_test.go b/internal/models/image_test.go
new file mode 100644
index 0000000..459714f
--- /dev/null
+++ b/internal/models/image_test.go
@@ -0,0 +1,233 @@
+package models
+
+import (
+ "context"
+ "fmt"
+ "testing"
+
+ "github.com/DATA-DOG/go-sqlmock"
+ "github.com/projecteru2/vmihub/internal/utils"
+ "github.com/projecteru2/vmihub/pkg/types"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestQueryRepo(t *testing.T) {
+ utils.SetupRedis(nil, t)
+ err := Init(nil, t)
+ assert.Nil(t, err)
+ defer func() {
+ err = Mock.ExpectationsWereMet()
+ assert.Nil(t, err)
+ }()
+
+ tableName := ((*Repository)(nil)).TableName()
+ columns := ((*Repository)(nil)).ColumnNames()
+ {
+ // empty result
+ Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE username = ? ORDER BY updated_at DESC LIMIT ?, ?", columns, tableName)).
+ WithArgs("user2", 0, 10).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "username", "name"}))
+
+ repos, err := QueryRepoList("user2", 1, 10)
+ assert.Nil(t, err)
+ assert.Len(t, repos, 0)
+ }
+ {
+ wantRows := sqlmock.NewRows([]string{"id", "username", "name"}).
+ AddRow(1, "user1", "name1").
+ AddRow(1, "user1", "name2")
+ Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE username = ? ORDER BY updated_at DESC LIMIT ?, ?", columns, tableName)).
+ WithArgs("user1", 0, 10).
+ WillReturnRows(wantRows)
+ repos, err := QueryRepoList("user1", 1, 10)
+ assert.Nil(t, err)
+ assert.Len(t, repos, 2)
+ }
+ {
+ wantRows := sqlmock.NewRows([]string{"id", "username", "name"}).
+ AddRow(1, "user1", "name2")
+ Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE username = ? AND name = ?", columns, tableName)).
+ WithArgs("user1", "name2").
+ WillReturnRows(wantRows)
+ repo, err := QueryRepo(context.Background(), "user1", "name2")
+ assert.Nil(t, err)
+ assert.Equal(t, "name2", repo.Name)
+ }
+}
+
+func TestQueryRepoCache(t *testing.T) {
+ utils.SetupRedis(nil, t)
+ err := Init(nil, t)
+ assert.Nil(t, err)
+ defer func() {
+ err = Mock.ExpectationsWereMet()
+ assert.Nil(t, err)
+ }()
+
+ tableName := ((*Repository)(nil)).TableName()
+ columns := ((*Repository)(nil)).ColumnNames()
+
+ {
+ wantRows := sqlmock.NewRows([]string{"id", "username", "name"}).
+ AddRow(1, "user1", "name2")
+ Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE username = ? AND name = ?", columns, tableName)).
+ WithArgs("user1", "name2").
+ WillReturnRows(wantRows)
+ repo, err := QueryRepo(context.Background(), "user1", "name2")
+ assert.Nil(t, err)
+ assert.Equal(t, "name2", repo.Name)
+ }
+ {
+ repo, err := QueryRepo(context.Background(), "user1", "name2")
+ assert.Nil(t, err)
+ assert.Equal(t, "name2", repo.Name)
+ }
+}
+func TestGetImages(t *testing.T) {
+ utils.SetupRedis(nil, t)
+ err := Init(nil, t)
+ assert.Nil(t, err)
+ defer func() {
+ err = Mock.ExpectationsWereMet()
+ assert.Nil(t, err)
+ }()
+
+ tableName := ((*Image)(nil)).TableName()
+ columns := ((*Image)(nil)).ColumnNames()
+
+ repo := *&Repository{
+ ID: 1,
+ Username: "user1",
+ Name: "name1",
+ }
+
+ {
+ utils.MockRedis.FlushAll()
+ wantRows := sqlmock.NewRows([]string{"id", "repo_id", "tag"}).
+ AddRow(1, 1, "tag1").
+ AddRow(2, 1, "tag2")
+ Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE repo_id = ? ORDER BY updated_at DESC", columns, tableName)).
+ WithArgs(1).
+ WillReturnRows(wantRows)
+ images, err := repo.GetImages()
+ assert.Nil(t, err)
+ assert.Len(t, images, 2)
+ }
+
+ {
+ utils.MockRedis.FlushAll()
+ wantRows := sqlmock.NewRows([]string{"id", "repo_id", "tag"}).
+ AddRow(2, 1, "tag2")
+ Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE repo_id = ? AND tag = ?", columns, tableName)).
+ WithArgs(1, "tag2").
+ WillReturnRows(wantRows)
+ img, err := repo.GetImage(context.Background(), "tag2")
+ assert.Nil(t, err)
+ assert.NotNil(t, img)
+ assert.Equal(t, "tag2", img.Tag)
+ }
+}
+
+func TestGetImageCache(t *testing.T) {
+ utils.SetupRedis(nil, t)
+ err := Init(nil, t)
+ assert.Nil(t, err)
+ defer func() {
+ err = Mock.ExpectationsWereMet()
+ assert.Nil(t, err)
+ }()
+
+ tableName := ((*Image)(nil)).TableName()
+ columns := ((*Image)(nil)).ColumnNames()
+
+ repo := *&Repository{
+ ID: 1,
+ Username: "user1",
+ Name: "name1",
+ }
+ {
+ wantRows := sqlmock.NewRows([]string{"id", "repo_id", "tag"}).
+ AddRow(2, 1, "tag2")
+ Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE repo_id = ? AND tag = ?", columns, tableName)).
+ WithArgs(1, "tag2").
+ WillReturnRows(wantRows)
+ img, err := repo.GetImage(context.Background(), "tag2")
+ assert.Nil(t, err)
+ assert.NotNil(t, img)
+ assert.Equal(t, "tag2", img.Tag)
+ }
+ {
+ img, err := repo.GetImage(context.Background(), "tag2")
+ assert.Nil(t, err)
+ assert.NotNil(t, img)
+ assert.Equal(t, "tag2", img.Tag)
+ }
+}
+
+func TestSaveRepo(t *testing.T) {
+ utils.SetupRedis(nil, t)
+ err := Init(nil, t)
+ assert.Nil(t, err)
+ defer func() {
+ err = Mock.ExpectationsWereMet()
+ assert.Nil(t, err)
+ }()
+ tableName := "repository"
+
+ repo := &Repository{
+ Username: "user1",
+ Name: "name1",
+ }
+ Mock.ExpectBegin()
+ Mock.ExpectExec(fmt.Sprintf("INSERT INTO %s(username, name, private) VALUES(?, ?, ?)", tableName)).
+ WithArgs(repo.Username, repo.Name, repo.Private).
+ WillReturnResult(sqlmock.NewResult(1234, 1))
+ tx, err := db.Beginx()
+ assert.Nil(t, err)
+
+ err = repo.Save(tx)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1234), repo.ID)
+
+}
+
+func TestSaveImage(t *testing.T) {
+ utils.SetupRedis(nil, t)
+ err := Init(nil, t)
+ assert.Nil(t, err)
+ defer func() {
+ err = Mock.ExpectationsWereMet()
+ assert.Nil(t, err)
+ }()
+ tableName := ((*Image)(nil)).TableName()
+
+ repo := &Repository{
+ ID: 2323,
+ Username: "user1",
+ Name: "name1",
+ }
+ img := &Image{
+ Tag: "latest",
+ Labels: NewJSONColumn(&Labels{}),
+ Size: 12345,
+ Digest: "12345",
+ Format: "qcow2",
+ OS: NewJSONColumn(&types.OSInfo{
+ Type: "linux",
+ Distrib: "ubuntu",
+ }),
+ }
+ osVal, err := img.OS.Value()
+ assert.Nil(t, err)
+ Mock.ExpectBegin()
+ Mock.ExpectExec(fmt.Sprintf("INSERT INTO %s(repo_id, tag, labels, size, format, os, digest, snapshot, description) VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)", tableName)).
+ WithArgs(repo.ID, img.Tag, sqlmock.AnyArg(), img.Size, img.Format, osVal, img.Digest, img.Snapshot, img.Description).
+ WillReturnResult(sqlmock.NewResult(1234, 1))
+ tx, err := db.Beginx()
+ assert.Nil(t, err)
+
+ err = repo.SaveImage(tx, img)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1234), img.ID)
+
+}
diff --git a/internal/models/init.go b/internal/models/init.go
new file mode 100644
index 0000000..f29f97b
--- /dev/null
+++ b/internal/models/init.go
@@ -0,0 +1,40 @@
+package models
+
+import (
+ "testing"
+
+ "github.com/DATA-DOG/go-sqlmock"
+ _ "github.com/go-sql-driver/mysql" // ignore lint error
+ "github.com/jmoiron/sqlx"
+ "github.com/projecteru2/vmihub/config"
+)
+
+var (
+ db *sqlx.DB
+ Mock sqlmock.Sqlmock
+)
+
+type Labels map[string]string
+
+func Init(cfg *config.MysqlConfig, t *testing.T) (err error) {
+ if t != nil {
+ sqlDB, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual))
+ if err != nil {
+ return err
+ }
+ db = sqlx.NewDb(sqlDB, "sqlmock")
+ Mock = mock
+ return nil
+ }
+ db, err = sqlx.Open("mysql", cfg.DSN)
+ if err != nil {
+ return
+ }
+ db.SetMaxOpenConns(cfg.MaxOpenConns)
+ db.SetMaxIdleConns(cfg.MaxIdleConns)
+ return
+}
+
+func Instance() *sqlx.DB {
+ return db
+}
diff --git a/internal/models/migration/00000000000000_initial.down.sql b/internal/models/migration/00000000000000_initial.down.sql
new file mode 100644
index 0000000..3ef2ba6
--- /dev/null
+++ b/internal/models/migration/00000000000000_initial.down.sql
@@ -0,0 +1,7 @@
+DROP TABLE IF EXISTS repository;
+
+DROP TABLE IF EXISTS `image`;
+
+DROP TABLE IF EXISTS `user`;
+
+DROP TABLE IF EXISTS `private_token`;
\ No newline at end of file
diff --git a/internal/models/migration/00000000000000_initial.up.sql b/internal/models/migration/00000000000000_initial.up.sql
new file mode 100644
index 0000000..6417abd
--- /dev/null
+++ b/internal/models/migration/00000000000000_initial.up.sql
@@ -0,0 +1,57 @@
+CREATE TABLE IF NOT EXISTS repository (
+ id MEDIUMINT NOT NULL AUTO_INCREMENT COMMENT 'repo id',
+ username CHAR(30) NOT NULL COMMENT 'create username',
+ name CHAR(30) NOT NULL COMMENT 'name',
+ private BOOLEAN NOT NULL DEFAULT FALSE COMMENT 'is private',
+ os_type VARCHAR(50) NOT NULL DEFAULT '' COMMENT 'os type',
+ created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'repo create time',
+ updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'repo update time',
+ PRIMARY KEY (id),
+ UNIQUE (username, name)
+) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4 COLLATE = utf8mb4_unicode_ci;
+
+CREATE TABLE IF NOT EXISTS `image` (
+ id MEDIUMINT NOT NULL AUTO_INCREMENT COMMENT 'tag id',
+ repo_id MEDIUMINT NOT NULL COMMENT 'repo id',
+ tag VARCHAR(40) NOT NULL COMMENT 'image tag',
+ labels JSON NOT NULL COMMENT 'image labels',
+ # state ENUM( 'creating', 'ready', 'unknown') NOT NULL DEFAULT 'creating' COMMENT 'image state',
+ digest VARCHAR(80) NOT NULL COMMENT 'image digest',
+ size BIGINT(20) UNSIGNED NOT NULL DEFAULT '0' COMMENT 'image size, byte',
+ virtual_size BIGINT(20) UNSIGNED NOT NULL DEFAULT '0' COMMENT 'image virtual size, byte',
+ FORMAT VARCHAR(10) NOT NULL COMMENT 'image format',
+ os JSON NOT NULL COMMENT 'os information',
+ snapshot VARCHAR(80) NOT NULL COMMENT 'RBD snapshot for image',
+ description VARCHAR(100) NOT NULL DEFAULT '' COMMENT 'image description',
+ created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'image create time',
+ updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'image update time',
+ PRIMARY KEY (id),
+ UNIQUE (repo_id, tag)
+) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4 COLLATE = utf8mb4_unicode_ci;
+
+CREATE TABLE IF NOT EXISTS `user` (
+ id INT(10) UNSIGNED NOT NULL AUTO_INCREMENT COMMENT 'user id',
+ username VARCHAR(50) NOT NULL COMMENT 'user name',
+ password VARCHAR(255) NOT NULL COMMENT 'user pwd',
+ email VARCHAR(100) NOT NULL COMMENT 'user email',
+ nickname VARCHAR(50) NOT NULL COMMENT 'nick name',
+ admin BOOLEAN NOT NULL DEFAULT FALSE COMMENT 'is administrator',
+ created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'create time',
+ updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'update time',
+ PRIMARY KEY (id),
+ UNIQUE KEY username (username),
+ UNIQUE KEY namespace (namespace)
+) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4 COLLATE = utf8mb4_unicode_ci;
+
+CREATE TABLE IF NOT EXISTS private_token (
+ id INT(10) UNSIGNED NOT NULL AUTO_INCREMENT COMMENT 'user id',
+ name VARCHAR(50) NOT NULL COMMENT 'token name',
+ user_id MEDIUMINT NOT NULL COMMENT 'user id',
+ token VARCHAR(100) NOT NULL COMMENT 'token',
+ created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'create time',
+ last_used TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'last use time',
+ expired_at DATETIME NOT NULL DEFAULT '9999-12-31 23:59:59' COMMENT 'expired time',
+ PRIMARY KEY (id),
+ UNIQUE KEY token (token),
+ UNIQUE (user_id, name)
+) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4 COLLATE = utf8mb4_unicode_ci;
diff --git a/internal/models/migration/README.md b/internal/models/migration/README.md
new file mode 100644
index 0000000..9ca2240
--- /dev/null
+++ b/internal/models/migration/README.md
@@ -0,0 +1,40 @@
+## 安装migrate
+直接取 [这里](https://github.com/golang-migrate/migrate/releases)下载,或者运行`make db-migrate-setup`
+
+## 创建up和down文件
+
+```
+make db-migrate-create table=xxx
+```
+或者直接跑migrate命令
+```
+migrate create -ext sql -dir internal/models/migration op_log_table
+```
+
+## 手动编写up和down文件,up是应用文件,down是回滚文件
+```
+参考:
+20231227072912_op_log_table.up.sql
+20231227072912_op_log_table.down.sql
+```
+
+## 迁移
+
+```
+make db-migrate-up uri='mysql://vmihub:******@tcp(10.200.0.188:3306)/vmihub_test?parseTime=true'
+```
+
+或者直接运行migrate命令
+```
+migrate -database 'mysql://vmihub:******@tcp(10.200.0.188:3306)/vmihub_test?parseTime=true' -path ./internal/models/migration up 1
+```
+
+## 回滚
+```
+make db-migrate-down uri='mysql://vmihub:******@tcp(10.200.0.188:3306)/vmihub_test?parseTime=true' N=1
+```
+
+或者直接运行migrate命令
+```
+migrate -database 'mysql://vmihub:******@tcp(10.200.0.188:3306)/vmihub_test?parseTime=true' -path ./internal/models/migration down
+```
\ No newline at end of file
diff --git a/internal/models/request.go b/internal/models/request.go
new file mode 100644
index 0000000..949d7bb
--- /dev/null
+++ b/internal/models/request.go
@@ -0,0 +1,11 @@
+package models
+
+import "github.com/dgrijalva/jwt-go"
+
+// CustomClaims 加解密需要生成的结构
+type CustomClaims struct {
+ ID int64
+ UserName string
+ // AuthorityId uint // 角色认证ID
+ jwt.StandardClaims
+}
diff --git a/internal/models/templates/ceph.client.eru.keyring b/internal/models/templates/ceph.client.eru.keyring
new file mode 100644
index 0000000..5bd99e2
--- /dev/null
+++ b/internal/models/templates/ceph.client.eru.keyring
@@ -0,0 +1,2 @@
+[client.eru]
+ key = {{ .key }}
diff --git a/internal/models/templates/ceph.conf b/internal/models/templates/ceph.conf
new file mode 100644
index 0000000..f40e2f8
--- /dev/null
+++ b/internal/models/templates/ceph.conf
@@ -0,0 +1,4 @@
+# minimal ceph.conf for f72e4cba-2aef-11ee-91cc-ba899cefe809
+[global]
+ fsid = {{ .fsid }}
+ mon_host = {{range .mon_host }} [v2:{{ . }}:3300/0,v1:{{ . }}:6789/0] {{end}}
diff --git a/internal/models/templates/rbdmap b/internal/models/templates/rbdmap
new file mode 100644
index 0000000..90f235b
--- /dev/null
+++ b/internal/models/templates/rbdmap
@@ -0,0 +1,2 @@
+# RbdDevice Parameters
+# poolname/imagename id=client,keyring=/etc/ceph/ceph.client.keyring
diff --git a/internal/models/user.go b/internal/models/user.go
new file mode 100644
index 0000000..35863ce
--- /dev/null
+++ b/internal/models/user.go
@@ -0,0 +1,301 @@
+package models
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/cockroachdb/errors"
+ "github.com/jmoiron/sqlx"
+ "github.com/projecteru2/vmihub/internal/utils"
+ "github.com/projecteru2/vmihub/pkg/terrors"
+ "github.com/redis/go-redis/v9"
+ "golang.org/x/crypto/bcrypt"
+)
+
+type User struct {
+ ID int64 `db:"id" json:"id"`
+ Username string `db:"username" json:"username" description:"Login user name"`
+ Password string `db:"password" json:"password" description:"user login password"`
+ Nickname string `db:"nickname" json:"nickname" description:"user's nickname"`
+ Email string `db:"email" json:"email" description:"user's email"`
+ Admin bool `db:"admin" json:"admin" description:"is a admin"`
+ CreatedAt time.Time `db:"created_at" json:"createdAt" description:"user create time"`
+ UpdatedAt time.Time `db:"updated_at" json:"updatedAt" description:"user update time"`
+}
+
+func (*User) TableName() string {
+ return "user"
+}
+
+func (user *User) ColumnNames() string {
+ names := GetColumnNames(user)
+ return strings.Join(names, ", ")
+}
+
+type PrivateToken struct {
+ ID int64 `db:"id" json:"id"`
+ Name string `db:"name" json:"name"`
+ UserID int64 `db:"user_id" json:"userId"`
+ Token string `db:"token" json:"token"`
+ ExpiredAt time.Time `db:"expired_at" json:"expiredAt"`
+ CreatedAt time.Time `db:"created_at" json:"createdAt" description:"user create time"`
+ LastUsed time.Time `db:"last_used" json:"lastUsed"`
+}
+
+func (*PrivateToken) tableName() string {
+ return "private_token"
+}
+
+func (t *PrivateToken) columnNames() string {
+ names := GetColumnNames(t)
+ return strings.Join(names, ", ")
+}
+
+func (t *PrivateToken) GetUser() (*User, error) {
+ return GetUserByID(context.TODO(), t.UserID)
+}
+
+func (t *PrivateToken) Save(tx *sqlx.Tx) (err error) {
+ if tx == nil {
+ tx, _ = db.Beginx()
+ defer func() {
+ if err == nil {
+ _ = tx.Commit()
+ }
+ }()
+ }
+ sqlStr := "INSERT INTO private_token(user_id, name, token, expired_at) VALUES(?, ?, ?, ?)"
+ sqlRes, err := tx.Exec(sqlStr, t.UserID, t.Name, t.Token, t.ExpiredAt)
+ if err != nil {
+ _ = tx.Rollback()
+ return errors.Wrapf(err, "failed to insert private token: %v", t)
+ }
+ // fetch guest id
+ t.ID, _ = sqlRes.LastInsertId()
+ return nil
+}
+
+func (t *PrivateToken) Delete(tx *sqlx.Tx) (err error) {
+ if tx == nil {
+ tx, _ = db.Beginx()
+ defer func() {
+ if err == nil {
+ _ = tx.Commit()
+ }
+ }()
+ }
+ sqlStr := "DELETE FROM private_token WHERE id = ?"
+ if _, err = tx.Exec(sqlStr, t.ID); err != nil {
+ _ = tx.Rollback()
+ return errors.Wrapf(err, "failed to delete private token: %v", t)
+ }
+ return
+}
+
+func (t *PrivateToken) UpdateLastUsed() error {
+ sqlStr := "UPDATE private_token SET last_used = ? WHERE id = ?"
+ _, err := db.Exec(sqlStr, time.Now(), t.ID)
+ return err
+}
+
+func QueryPrivateTokensByUser(ctx context.Context, userID int64) (tokens []*PrivateToken, err error) {
+ tblName := ((*PrivateToken)(nil)).tableName()
+ columns := ((*PrivateToken)(nil)).columnNames()
+ sqlStr := fmt.Sprintf("SELECT %s FROM %s WHERE user_id = ?", columns, tblName)
+ err = db.SelectContext(ctx, &tokens, sqlStr, userID)
+ return
+}
+
+func (user *User) Update(tx *sqlx.Tx) (err error) {
+ if tx == nil {
+ tx, _ = db.Beginx()
+ defer func() {
+ if err == nil {
+ _ = tx.Commit()
+ }
+ }()
+ }
+ defer func() {
+ if err == nil {
+ deleteUserInRedis(context.TODO(), user)
+ }
+ }()
+ sqlStr := "UPDATE user SET nickname = ?, email = ? WHERE id = ?"
+ if _, err = tx.Exec(sqlStr, user.Nickname, user.Email, user.ID); err != nil {
+ _ = tx.Rollback()
+ return errors.Wrapf(err, "failed to update user: %v", user)
+ }
+ return
+}
+
+func (user *User) UpdatePwd(password string) (err error) {
+ defer func() {
+ if err == nil {
+ deleteUserInRedis(context.TODO(), user)
+ }
+ }()
+ ePasswd, err := utils.EncryptPassword(password)
+ if err != nil {
+ return err
+ }
+ sqlStr := "UPDATE user set password = ? where id = ?"
+ if _, err = db.Exec(sqlStr, ePasswd, user.ID); err != nil {
+ return err
+ }
+ return nil
+}
+
+func CreateUser(tx *sqlx.Tx, user *User, password string) (err error) {
+ if tx == nil {
+ tx, _ = db.Beginx()
+ defer func() {
+ if err == nil {
+ _ = tx.Commit()
+ }
+ }()
+ }
+ if user.Password, err = utils.EncryptPassword(password); err != nil {
+ return err
+ }
+ sqlStr := "INSERT INTO user (username, password, email, nickname) VALUES (?, ?, ?, ?)"
+ sqlRes, err := tx.Exec(sqlStr, user.Username, user.Password, user.Email, user.Nickname)
+ if err != nil {
+ return err
+ }
+ user.ID, _ = sqlRes.LastInsertId()
+ return nil
+}
+
+func GetPrivateToken(token string) (*PrivateToken, error) {
+ privToken := &PrivateToken{}
+ tblName := ((*PrivateToken)(nil)).tableName()
+ columns := ((*PrivateToken)(nil)).columnNames()
+ sqlStr := fmt.Sprintf("SELECT %s FROM %s WHERE token = ?", columns, tblName)
+ err := db.Get(privToken, sqlStr, token)
+ if err == sql.ErrNoRows {
+ return nil, nil //nolint
+ }
+ if err != nil {
+ return nil, err
+ }
+ return privToken, nil
+}
+
+func GetPrivateTokenByUserAndName(userID int64, name string) (*PrivateToken, error) {
+ privToken := &PrivateToken{}
+ tblName := ((*PrivateToken)(nil)).tableName()
+ columns := ((*PrivateToken)(nil)).columnNames()
+ sqlStr := fmt.Sprintf("SELECT %s FROM %s WHERE user_id = ? AND name = ?", columns, tblName)
+ err := db.Get(privToken, sqlStr, userID, name)
+ if err == sql.ErrNoRows {
+ return nil, nil //nolint
+ }
+ if err != nil {
+ return nil, err
+ }
+ return privToken, nil
+}
+
+func GetUser(ctx context.Context, idStr string) (*User, error) {
+ tblName := ((*User)(nil)).TableName()
+ columes := ((*User)(nil)).ColumnNames()
+ user, err := getUserFromRedis(ctx, idStr)
+ if err != nil {
+ return nil, err
+ }
+ if user != nil {
+ return user, nil
+ }
+ user = &User{}
+ sqlStr := fmt.Sprintf("SELECT %s FROM %s WHERE username = ?", columes, tblName)
+ err = db.Get(user, sqlStr, idStr)
+ if err == sql.ErrNoRows {
+ return nil, nil //nolint
+ }
+ if err != nil {
+ return nil, err
+ }
+ if err = setUserToRedis(ctx, user); err != nil {
+ return nil, err
+ }
+ return user, nil
+}
+
+func GetUserByID(ctx context.Context, id int64) (*User, error) {
+ tblName := ((*User)(nil)).TableName()
+ user, err := getUserFromRedisByID(ctx, id)
+ if err != nil {
+ return nil, err
+ }
+ if user != nil {
+ return user, nil
+ }
+
+ sqlStr := fmt.Sprintf("SELECT * FROM %s WHERE id = ?", tblName)
+ user = &User{}
+ err = db.Get(user, sqlStr, id)
+ if err == sql.ErrNoRows {
+ return nil, nil //nolint:nilnil
+ }
+ if err != nil {
+ return user, err
+ }
+ if err = setUserToRedisByID(ctx, user); err != nil {
+ return nil, err
+ }
+ return user, nil
+}
+
+func CheckAndGetUser(ctx context.Context, idStr, password string) (*User, error) {
+ user, err := GetUser(ctx, idStr)
+ if err != nil {
+ return user, err
+ }
+ if user == nil {
+ return nil, terrors.ErrInvalidUserPass
+ }
+ // compare password
+ if err := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(password)); err != nil {
+ return nil, terrors.ErrInvalidUserPass
+ }
+ return user, nil
+}
+
+func getUserFromRedis(ctx context.Context, username string) (u *User, err error) {
+ rKey := fmt.Sprintf(redisUserKey, username)
+ u = &User{}
+ err = utils.GetObjFromRedis(ctx, rKey, u)
+ if err == redis.Nil {
+ return nil, nil //nolint
+ }
+ return
+}
+
+func setUserToRedis(ctx context.Context, u *User) (err error) {
+ rKey := fmt.Sprintf(redisUserKey, u.Username)
+ return utils.SetObjToRedis(ctx, rKey, u, 10*time.Minute)
+}
+
+func getUserFromRedisByID(ctx context.Context, id int64) (u *User, err error) {
+ rKey := fmt.Sprintf(redisUserIDKey, id)
+ u = &User{}
+ err = utils.GetObjFromRedis(ctx, rKey, u)
+ if err == redis.Nil {
+ return nil, nil //nolint
+ }
+ return
+}
+
+func setUserToRedisByID(ctx context.Context, u *User) (err error) {
+ rKey := fmt.Sprintf(redisUserIDKey, u.ID)
+ return utils.SetObjToRedis(ctx, rKey, u, 10*time.Minute)
+}
+
+func deleteUserInRedis(ctx context.Context, u *User) {
+ rKey1 := fmt.Sprintf(redisUserKey, u.Username)
+ rKey2 := fmt.Sprintf(redisUserIDKey, u.ID)
+ _ = utils.DeleteObjectsInRedis(ctx, rKey1, rKey2)
+}
diff --git a/internal/models/user_test.go b/internal/models/user_test.go
new file mode 100644
index 0000000..395fab6
--- /dev/null
+++ b/internal/models/user_test.go
@@ -0,0 +1,68 @@
+package models
+
+import (
+ "context"
+ "fmt"
+ "testing"
+
+ "github.com/DATA-DOG/go-sqlmock"
+ "github.com/projecteru2/vmihub/internal/utils"
+ "github.com/stretchr/testify/assert"
+)
+
+var (
+ tableName = ((*User)(nil)).TableName()
+ columns = ((*User)(nil)).ColumnNames()
+)
+
+func TestGetUser(t *testing.T) {
+ utils.SetupRedis(nil, t)
+ err := Init(nil, t)
+ assert.Nil(t, err)
+
+ defer func() {
+ err = Mock.ExpectationsWereMet()
+ assert.Nil(t, err)
+ }()
+
+ username := "user1"
+ passwd := "passwd1"
+ ePasswd, err := utils.EncryptPassword(passwd)
+ assert.Nil(t, err)
+
+ wantRows := sqlmock.NewRows([]string{"id", "username", "password"}).
+ AddRow(1, username, ePasswd)
+
+ Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE username = ?", columns, tableName)).
+ WithArgs(username).
+ WillReturnRows(wantRows)
+ user, err := GetUser(context.Background(), username)
+ assert.Nil(t, err)
+ assert.Equal(t, username, user.Username)
+
+}
+
+func TestCheckAndGetUser(t *testing.T) {
+ utils.SetupRedis(nil, t)
+ err := Init(nil, t)
+ assert.Nil(t, err)
+
+ defer func() {
+ err = Mock.ExpectationsWereMet()
+ assert.Nil(t, err)
+ }()
+
+ username := "user1"
+ passwd := "passwd1"
+ ePasswd, err := utils.EncryptPassword(passwd)
+ assert.Nil(t, err)
+
+ wantRows := sqlmock.NewRows([]string{"id", "username", "password"}).
+ AddRow(1, username, ePasswd)
+ Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE username = ?", columns, tableName)).
+ WithArgs(username).
+ WillReturnRows(wantRows)
+ user2, err := CheckAndGetUser(context.Background(), username, passwd)
+ assert.Nil(t, err)
+ assert.Equal(t, user2.Username, username)
+}
diff --git a/internal/models/utils.go b/internal/models/utils.go
new file mode 100644
index 0000000..5e61b13
--- /dev/null
+++ b/internal/models/utils.go
@@ -0,0 +1,50 @@
+package models
+
+import (
+ "database/sql/driver"
+ "encoding/json"
+ "reflect"
+)
+
+type JSONColumn[T any] struct {
+ V *T
+}
+
+func (j *JSONColumn[T]) Scan(src any) error {
+ if src == nil {
+ j.V = nil
+ return nil
+ }
+ j.V = new(T)
+ return json.Unmarshal(src.([]byte), j.V)
+}
+
+func (j *JSONColumn[T]) Value() (driver.Value, error) {
+ raw, err := json.Marshal(j.V)
+ return raw, err
+}
+
+func (j *JSONColumn[T]) Get() *T {
+ return j.V
+}
+
+func NewJSONColumn[T any](v *T) JSONColumn[T] {
+ return JSONColumn[T]{
+ V: v,
+ }
+}
+
+func GetColumnNames[T any](obj *T) []string {
+ ty := reflect.TypeOf(obj).Elem()
+ ans := make([]string, 0, ty.NumField())
+ for i := 0; i < ty.NumField(); i++ {
+ // 获取字段
+ field := ty.Field(i)
+ dbTag := field.Tag.Get("db")
+ if dbTag == "-" {
+ continue
+ }
+ ans = append(ans, dbTag)
+ }
+ return ans
+}
diff --git a/internal/models/utils_test.go b/internal/models/utils_test.go
new file mode 100644
index 0000000..b58ae05
--- /dev/null
+++ b/internal/models/utils_test.go
@@ -0,0 +1,29 @@
+package models
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestJsonColumn(t *testing.T) {
+ d := map[string]string{
+ "key": "value",
+ }
+ col := NewJSONColumn(&d)
+ bs, err := json.Marshal(col)
+ assert.Nil(t, err)
+ var col1 JSONColumn[map[string]string]
+ err = json.Unmarshal(bs, &col1)
+ assert.Nil(t, err)
+ d2 := *col1.Get()
+ assert.Len(t, d2, 1)
+ assert.Equal(t, "value", d2["key"])
+}
+
+func TestName(t *testing.T) {
+ img := &Image{}
+ assert.Equal(t, img.TableName(), "image")
+ assert.Equal(t, ((*Image)(nil)).TableName(), "image")
+}
diff --git a/internal/storage/factory/factory.go b/internal/storage/factory/factory.go
new file mode 100644
index 0000000..34e0c69
--- /dev/null
+++ b/internal/storage/factory/factory.go
@@ -0,0 +1,35 @@
+package factory
+
+import (
+ "github.com/cockroachdb/errors"
+ "github.com/projecteru2/vmihub/config"
+ "github.com/projecteru2/vmihub/internal/storage"
+ "github.com/projecteru2/vmihub/internal/storage/local"
+ "github.com/projecteru2/vmihub/internal/storage/mocks"
+ "github.com/projecteru2/vmihub/internal/storage/s3"
+)
+
+var (
+ stor storage.Storage
+)
+
+func Init(cfg *config.StorageConfig) (storage.Storage, error) {
+ var err error
+ if stor == nil {
+ switch cfg.Type {
+ case "local":
+ stor = local.New(cfg.Local.BaseDir)
+ case "s3":
+ stor, err = s3.New(cfg.S3.Endpoint, cfg.S3.AccessKey, cfg.S3.SecretKey, cfg.S3.Bucket, cfg.S3.BaseDir, nil)
+ case "mock":
+ stor = &mocks.Storage{}
+ default:
+ err = errors.Newf("unknown storage type %s", cfg.Type)
+ }
+ }
+ return stor, err
+}
+
+func Instance() storage.Storage {
+ return stor
+}
diff --git a/internal/storage/local/local.go b/internal/storage/local/local.go
new file mode 100644
index 0000000..c6917c4
--- /dev/null
+++ b/internal/storage/local/local.go
@@ -0,0 +1,168 @@
+package local
+
+import (
+ "context"
+ "io"
+ "os"
+ "path/filepath"
+
+ "github.com/cockroachdb/errors"
+ "github.com/google/uuid"
+ stotypes "github.com/projecteru2/vmihub/internal/storage/types"
+ "github.com/projecteru2/vmihub/internal/utils"
+ "github.com/projecteru2/vmihub/pkg/terrors"
+ pkgutils "github.com/projecteru2/vmihub/pkg/utils"
+)
+
+type Store struct {
+ BaseDir string
+}
+
+func New(d string) *Store {
+ return &Store{
+ BaseDir: d,
+ }
+}
+
+func (s *Store) Get(_ context.Context, name string) (io.ReadCloser, error) {
+ filename := filepath.Join(s.BaseDir, name)
+
+ // 如果文件不存在,则返回错误响应
+ if _, err := os.Stat(filename); os.IsNotExist(err) {
+ return nil, errors.New("file not found")
+ }
+
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+func (s *Store) Delete(_ context.Context, name string, ignoreNotExists bool) error { //nolint:nolintlint //nolint
+ fullName := filepath.Join(s.BaseDir, name)
+ err := os.Remove(fullName)
+ if err != nil {
+ if ignoreNotExists && os.IsNotExist(err) {
+ return nil
+ }
+ return err
+ }
+ return nil
+}
+
+func (s *Store) Put(_ context.Context, name string, digest string, in io.ReadSeeker) error { //nolint:nolintlint //nolint
+ fullName := filepath.Join(s.BaseDir, name)
+ if err := utils.EnsureDir(filepath.Dir(fullName)); err != nil {
+ return errors.Wrapf(err, "failed to create dir")
+ }
+
+ if err := utils.Invoke(func() error {
+ f, err := os.OpenFile(fullName, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0766)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ if _, err = io.Copy(f, in); err != nil {
+ return err
+ }
+ return nil
+ }); err != nil {
+ return err
+ }
+
+ fileDigest, err := pkgutils.CalcDigestOfFile(fullName)
+ if err != nil {
+ return err
+ }
+ if fileDigest != digest {
+ return terrors.ErrInvalidDigest
+ }
+ return nil
+}
+
+func (s *Store) PutWithChunk(ctx context.Context, name string, digest string, size int, chunkSize int, in io.ReaderAt) error { //nolint
+ return nil
+}
+
+func (s *Store) SeekRead(_ context.Context, name string, start int64) (io.ReadCloser, error) {
+ filename := filepath.Join(s.BaseDir, name)
+
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ if _, err = f.Seek(start, 0); err != nil {
+ return nil, err
+ }
+
+ return f, nil
+}
+
+func (s *Store) CreateChunkWrite(_ context.Context, _ string) (string, error) {
+ return uuid.New().String(), nil
+}
+
+func (s *Store) ChunkWrite(_ context.Context, name string, _ string, info *stotypes.ChunkInfo) error {
+ in := info.In
+ offset := int64(info.Idx) * info.ChunkSize
+ filename := filepath.Join(s.BaseDir, name)
+ if err := utils.EnsureDir(filepath.Dir(filename)); err != nil {
+ return errors.Wrapf(err, "failed to create dir")
+ }
+
+ f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0766)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ if _, err = f.Seek(offset, 0); err != nil {
+ return errors.Wrapf(err, "failed to seek file")
+ }
+ _, err = io.Copy(f, in)
+ return err
+}
+
+func (s *Store) CompleteChunkWrite(_ context.Context, _ string, _ string, _ []*stotypes.ChunkInfo) error {
+ return nil
+}
+
+func (s *Store) Copy(_ context.Context, src, dest string) error {
+ destName := filepath.Join(s.BaseDir, dest)
+ srcName := filepath.Join(s.BaseDir, src)
+ srcF, err := os.OpenFile(srcName, os.O_RDONLY, 0766)
+ if err != nil {
+ return errors.Wrapf(err, "failed to open %s", srcName)
+ }
+ destF, err := os.OpenFile(destName, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0766)
+ if err := utils.EnsureDir(filepath.Dir(destName)); err != nil {
+ return errors.Wrapf(err, "failed to create dir for %s", destName)
+ }
+ if err != nil {
+ return errors.Wrapf(err, "failed to open %s", destName)
+ }
+ _, err = io.Copy(destF, srcF)
+ return err
+}
+
+func (s *Store) Move(ctx context.Context, src, dest string) error {
+ if err := s.Copy(ctx, src, dest); err != nil {
+ return err
+ }
+ srcName := filepath.Join(s.BaseDir, src)
+ return os.Remove(srcName)
+}
+
+func (s *Store) GetSize(_ context.Context, name string) (int64, error) {
+ filename := filepath.Join(s.BaseDir, name)
+ info, err := os.Stat(filename)
+ if err != nil {
+ return 0, err
+ }
+ return info.Size(), nil
+}
+
+func (s *Store) GetDigest(_ context.Context, name string) (string, error) {
+ filename := filepath.Join(s.BaseDir, name)
+ return pkgutils.CalcDigestOfFile(filename)
+}
diff --git a/internal/storage/local/local_test.go b/internal/storage/local/local_test.go
new file mode 100644
index 0000000..8a7c593
--- /dev/null
+++ b/internal/storage/local/local_test.go
@@ -0,0 +1,89 @@
+package local
+
+import (
+ "bytes"
+ "context"
+ "io"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/google/uuid"
+ pkgutils "github.com/projecteru2/vmihub/pkg/utils"
+ "github.com/stretchr/testify/suite"
+)
+
+var (
+ name = "hahaha"
+ val = "kakakaka"
+)
+
+type testSuite struct {
+ suite.Suite
+ sto Store
+}
+
+func TestLocalTestSuite(t *testing.T) {
+ suite.Run(t, new(testSuite))
+}
+
+func (s *testSuite) SetupSuite() {
+ basename := uuid.NewString()[:5]
+ baseDir := filepath.Join("/tmp", basename)
+ err := os.MkdirAll(baseDir, 0755)
+ s.Nil(err)
+
+ s.sto = Store{
+ BaseDir: baseDir,
+ }
+}
+
+func (s *testSuite) TearDownSuite() {
+ err := os.RemoveAll(s.sto.BaseDir)
+ s.Nil(err)
+}
+
+func (s *testSuite) SetupTest() {
+ buf := bytes.NewReader([]byte(val))
+ digest, err := pkgutils.CalcDigestOfStr(val)
+ s.Nil(err)
+ err = s.sto.Put(context.Background(), name, digest, buf)
+ s.Nil(err)
+}
+
+func (s *testSuite) TearDownTest() {
+ err := s.sto.Delete(context.Background(), name, true)
+ s.Nil(err)
+}
+
+func (s *testSuite) TestGet() {
+ out, err := s.sto.Get(context.Background(), name)
+ s.Nil(err)
+ res, err := io.ReadAll(out)
+ s.Nil(err)
+ s.Equal(val, string(res))
+}
+
+func (s *testSuite) TestDelete() {
+ err := s.sto.Delete(context.Background(), name, false)
+ s.Nil(err)
+ err = s.sto.Delete(context.Background(), name, false)
+ s.NotNil(err)
+ err = s.sto.Delete(context.Background(), name, true)
+ s.Nil(err)
+}
+
+func (s *testSuite) TestPut() {
+ v1 := "gagagaga"
+
+ buf := bytes.NewReader([]byte(v1))
+ digest, err := pkgutils.CalcDigestOfStr(v1)
+ s.Nil(err)
+ err = s.sto.Put(context.Background(), name, digest, buf)
+ s.Nil(err)
+ out, err := s.sto.Get(context.Background(), name)
+ s.Nil(err)
+ res, err := io.ReadAll(out)
+ s.Nil(err)
+ s.Equal(v1, string(res))
+}
diff --git a/internal/storage/mocks/Storage.go b/internal/storage/mocks/Storage.go
new file mode 100644
index 0000000..715c6e6
--- /dev/null
+++ b/internal/storage/mocks/Storage.go
@@ -0,0 +1,283 @@
+// Code generated by mockery v2.42.0. DO NOT EDIT.
+
+package mocks
+
+import (
+ context "context"
+ io "io"
+
+ mock "github.com/stretchr/testify/mock"
+
+ types "github.com/projecteru2/vmihub/internal/storage/types"
+)
+
+// Storage is an autogenerated mock type for the Storage type
+type Storage struct {
+ mock.Mock
+}
+
+// ChunkWrite provides a mock function with given fields: ctx, name, transactionID, info
+func (_m *Storage) ChunkWrite(ctx context.Context, name string, transactionID string, info *types.ChunkInfo) error {
+ ret := _m.Called(ctx, name, transactionID, info)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ChunkWrite")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, *types.ChunkInfo) error); ok {
+ r0 = rf(ctx, name, transactionID, info)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// CompleteChunkWrite provides a mock function with given fields: ctx, name, transactionID, chunkList
+func (_m *Storage) CompleteChunkWrite(ctx context.Context, name string, transactionID string, chunkList []*types.ChunkInfo) error {
+ ret := _m.Called(ctx, name, transactionID, chunkList)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CompleteChunkWrite")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, []*types.ChunkInfo) error); ok {
+ r0 = rf(ctx, name, transactionID, chunkList)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// CreateChunkWrite provides a mock function with given fields: ctx, name
+func (_m *Storage) CreateChunkWrite(ctx context.Context, name string) (string, error) {
+ ret := _m.Called(ctx, name)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateChunkWrite")
+ }
+
+ var r0 string
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string) (string, error)); ok {
+ return rf(ctx, name)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string) string); ok {
+ r0 = rf(ctx, name)
+ } else {
+ r0 = ret.Get(0).(string)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
+ r1 = rf(ctx, name)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Delete provides a mock function with given fields: ctx, name, ignoreNotExists
+func (_m *Storage) Delete(ctx context.Context, name string, ignoreNotExists bool) error {
+ ret := _m.Called(ctx, name, ignoreNotExists)
+
+ if len(ret) == 0 {
+ panic("no return value specified for Delete")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, bool) error); ok {
+ r0 = rf(ctx, name, ignoreNotExists)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Get provides a mock function with given fields: ctx, name
+func (_m *Storage) Get(ctx context.Context, name string) (io.ReadCloser, error) {
+ ret := _m.Called(ctx, name)
+
+ if len(ret) == 0 {
+ panic("no return value specified for Get")
+ }
+
+ var r0 io.ReadCloser
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string) (io.ReadCloser, error)); ok {
+ return rf(ctx, name)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string) io.ReadCloser); ok {
+ r0 = rf(ctx, name)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(io.ReadCloser)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
+ r1 = rf(ctx, name)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// GetDigest provides a mock function with given fields: ctx, name
+func (_m *Storage) GetDigest(ctx context.Context, name string) (string, error) {
+ ret := _m.Called(ctx, name)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetDigest")
+ }
+
+ var r0 string
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string) (string, error)); ok {
+ return rf(ctx, name)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string) string); ok {
+ r0 = rf(ctx, name)
+ } else {
+ r0 = ret.Get(0).(string)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
+ r1 = rf(ctx, name)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// GetSize provides a mock function with given fields: ctx, name
+func (_m *Storage) GetSize(ctx context.Context, name string) (int64, error) {
+ ret := _m.Called(ctx, name)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetSize")
+ }
+
+ var r0 int64
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string) (int64, error)); ok {
+ return rf(ctx, name)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string) int64); ok {
+ r0 = rf(ctx, name)
+ } else {
+ r0 = ret.Get(0).(int64)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
+ r1 = rf(ctx, name)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Move provides a mock function with given fields: ctx, src, dest
+func (_m *Storage) Move(ctx context.Context, src string, dest string) error {
+ ret := _m.Called(ctx, src, dest)
+
+ if len(ret) == 0 {
+ panic("no return value specified for Move")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok {
+ r0 = rf(ctx, src, dest)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Put provides a mock function with given fields: ctx, name, digest, in
+func (_m *Storage) Put(ctx context.Context, name string, digest string, in io.ReadSeeker) error {
+ ret := _m.Called(ctx, name, digest, in)
+
+ if len(ret) == 0 {
+ panic("no return value specified for Put")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, io.ReadSeeker) error); ok {
+ r0 = rf(ctx, name, digest, in)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// PutWithChunk provides a mock function with given fields: ctx, name, digest, size, chunkSize, in
+func (_m *Storage) PutWithChunk(ctx context.Context, name string, digest string, size int, chunkSize int, in io.ReaderAt) error {
+ ret := _m.Called(ctx, name, digest, size, chunkSize, in)
+
+ if len(ret) == 0 {
+ panic("no return value specified for PutWithChunk")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, int, int, io.ReaderAt) error); ok {
+ r0 = rf(ctx, name, digest, size, chunkSize, in)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// SeekRead provides a mock function with given fields: ctx, name, start
+func (_m *Storage) SeekRead(ctx context.Context, name string, start int64) (io.ReadCloser, error) {
+ ret := _m.Called(ctx, name, start)
+
+ if len(ret) == 0 {
+ panic("no return value specified for SeekRead")
+ }
+
+ var r0 io.ReadCloser
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, int64) (io.ReadCloser, error)); ok {
+ return rf(ctx, name, start)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, int64) io.ReadCloser); ok {
+ r0 = rf(ctx, name, start)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(io.ReadCloser)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, int64) error); ok {
+ r1 = rf(ctx, name, start)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// NewStorage creates a new instance of Storage. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewStorage(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *Storage {
+ mock := &Storage{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/internal/storage/s3/s3.go b/internal/storage/s3/s3.go
new file mode 100644
index 0000000..71ab3ac
--- /dev/null
+++ b/internal/storage/s3/s3.go
@@ -0,0 +1,415 @@
+package s3
+
+import (
+ "context"
+ "crypto/sha256"
+ "fmt"
+ "io"
+ "net/http/httptest"
+ "path"
+ "strings"
+ "testing"
+
+ "github.com/projecteru2/core/log"
+ stotypes "github.com/projecteru2/vmihub/internal/storage/types"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/session"
+ s3svc "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/cockroachdb/errors"
+ "github.com/johannesboyne/gofakes3"
+ "github.com/johannesboyne/gofakes3/backend/s3mem"
+ "github.com/mitchellh/mapstructure"
+ "github.com/stretchr/testify/assert"
+)
+
+type Store struct {
+ Endpoint string
+ AccessKey string
+ SecretKey string
+ Bucket string
+ BaseDir string
+ s3Client *s3svc.S3
+}
+
+type s3ReaderCloser struct {
+ store *Store
+ name string
+ offset int64
+ tempNext int64
+}
+
+func (rc *s3ReaderCloser) Read(p []byte) (n int, err error) {
+ expectedLen := len(p)
+ body, err := rc.store.readRange(context.Background(), rc.name, rc.offset+rc.tempNext, rc.offset+rc.tempNext+int64(expectedLen)-1)
+ if err != nil {
+ if strings.HasPrefix(err.Error(), "InvalidRange") {
+ return 0, io.EOF
+ }
+ return 0, err
+ }
+ temp, err := io.ReadAll(body)
+ if err != nil {
+ return 0, err
+ }
+ n = len(temp)
+ copy(p, temp)
+ rc.tempNext += int64(n)
+ return n, nil
+}
+
+func (rc *s3ReaderCloser) Close() error {
+ return nil
+}
+
+func New(endpoint string, accessKey string, secretKey string, bucket string, baseDir string, t *testing.T) (*Store, error) {
+ var (
+ s3Client *s3svc.S3
+ err error
+ )
+ if t == nil {
+ s3Client, err = newS3Client(endpoint, accessKey, secretKey)
+ } else {
+ s3Client = newMockS3Client(t, accessKey, secretKey, bucket)
+ }
+ if err != nil {
+ return nil, err
+ }
+ return &Store{
+ Endpoint: endpoint,
+ AccessKey: accessKey,
+ SecretKey: secretKey,
+ Bucket: bucket,
+ BaseDir: baseDir,
+ s3Client: s3Client,
+ }, nil
+}
+
+func (s *Store) Get(_ context.Context, name string) (io.ReadCloser, error) {
+ resp, err := s.s3Client.GetObject(&s3svc.GetObjectInput{
+ Bucket: aws.String(s.Bucket),
+ Key: aws.String(path.Join(s.BaseDir, name)),
+ })
+ if err != nil {
+ return nil, err
+ }
+ return resp.Body, nil
+}
+
+func (s *Store) Delete(_ context.Context, name string, ignoreNotExists bool) error {
+ if !ignoreNotExists {
+ _, err := s.s3Client.HeadObject(&s3svc.HeadObjectInput{
+ Bucket: aws.String(s.Bucket),
+ Key: aws.String(path.Join(s.BaseDir, name))})
+ if err != nil {
+ return err
+ }
+ }
+ _, err := s.s3Client.DeleteObject(&s3svc.DeleteObjectInput{
+ Bucket: aws.String(s.Bucket),
+ Key: aws.String(path.Join(s.BaseDir, name)),
+ })
+ return err
+}
+
+func (s *Store) Put(_ context.Context, name string, digest string, in io.ReadSeeker) error {
+ _, err := s.s3Client.PutObject(&s3svc.PutObjectInput{
+ Body: in,
+ Bucket: aws.String(s.Bucket),
+ Key: aws.String(path.Join(s.BaseDir, name)),
+ Metadata: map[string]*string{"sha256": aws.String(digest)},
+ })
+ return err
+}
+
+func (s *Store) PutWithChunk(ctx context.Context, name string, digest string, size int, chunkSize int, in io.ReaderAt) error { //nolint
+ logger := log.WithFunc("s3.PutWithChunk")
+ respInit, err := s.s3Client.CreateMultipartUpload(&s3svc.CreateMultipartUploadInput{
+ Bucket: aws.String(s.Bucket),
+ Key: aws.String(path.Join(s.BaseDir, name)),
+ })
+ if err != nil {
+ return err
+ }
+ uploadID := *respInit.UploadId
+ nChunks := (size + chunkSize - 1) / chunkSize
+ completes := make([]*s3svc.CompletedPart, 0, nChunks)
+ logger.Debugf(ctx, "total size: %d, nChunks: %d, chunkSize: %d", size, nChunks, chunkSize)
+ for chunkIdx := 0; chunkIdx < nChunks; chunkIdx++ {
+ offset := int64(chunkIdx * chunkSize)
+ curSize := int64(chunkSize)
+ if chunkIdx == (nChunks - 1) {
+ curSize = int64(size) - offset
+ }
+ sReader := io.NewSectionReader(in, offset, curSize)
+ logger.Debugf(ctx, "write chunk %d, size %d", chunkIdx, curSize)
+ partNum := chunkIdx + 1
+ param := &s3svc.UploadPartInput{
+ Bucket: aws.String(s.Bucket),
+ Key: aws.String(path.Join(s.BaseDir, name)),
+ PartNumber: aws.Int64(int64(partNum)), // Required 每次的序号唯一且递增
+ UploadId: aws.String(uploadID),
+ Body: sReader,
+ ContentLength: aws.Int64(curSize),
+ }
+ respChunk, err := s.s3Client.UploadPart(param)
+ if err != nil {
+ s.s3Client.AbortMultipartUploadRequest(&s3svc.AbortMultipartUploadInput{
+ UploadId: aws.String(uploadID),
+ })
+ return errors.Wrapf(err, "upload part %d", partNum)
+ }
+ cp := &s3svc.CompletedPart{
+ PartNumber: aws.Int64(int64(partNum)),
+ ETag: respChunk.ETag,
+ }
+ completes = append(completes, cp)
+ }
+ _, err = s.s3Client.CompleteMultipartUpload(&s3svc.CompleteMultipartUploadInput{
+ Bucket: aws.String(s.Bucket),
+ Key: aws.String(path.Join(s.BaseDir, name)),
+ UploadId: aws.String(uploadID),
+ MultipartUpload: &s3svc.CompletedMultipartUpload{
+ Parts: completes,
+ },
+ })
+ return err
+}
+
+func (s *Store) readRange(_ context.Context, name string, start int64, end int64) (io.ReadCloser, error) {
+ resp, err := s.s3Client.GetObject(&s3svc.GetObjectInput{
+ Bucket: aws.String(s.Bucket),
+ Key: aws.String(path.Join(s.BaseDir, name)),
+ Range: aws.String(fmt.Sprintf("bytes=%d-%d", start, end)),
+ })
+ if err != nil {
+ return nil, err
+ }
+ return resp.Body, nil
+}
+
+func (s *Store) SeekRead(_ context.Context, name string, start int64) (io.ReadCloser, error) {
+ return &s3ReaderCloser{
+ store: s,
+ name: name,
+ offset: start,
+ }, nil
+}
+
+func (s *Store) CreateChunkWrite(_ context.Context, name string) (string, error) {
+ respInit, err := s.s3Client.CreateMultipartUpload(&s3svc.CreateMultipartUploadInput{
+ Bucket: aws.String(s.Bucket),
+ Key: aws.String(path.Join(s.BaseDir, name)),
+ })
+ if err != nil {
+ return "", err
+ }
+ transactionID := *respInit.UploadId
+
+ return transactionID, nil
+}
+
+func (s *Store) ChunkWrite(_ context.Context, name string, transactionID string, info *stotypes.ChunkInfo) error {
+ partNum := info.Idx + 1
+
+ param := &s3svc.UploadPartInput{
+ Bucket: aws.String(s.Bucket),
+ Key: aws.String(path.Join(s.BaseDir, name)),
+ PartNumber: aws.Int64(int64(partNum)), // Required 每次的序号唯一且递增
+ UploadId: aws.String(transactionID),
+ Body: info.In,
+ ContentLength: aws.Int64(info.Size),
+ }
+ respChunk, err := s.s3Client.UploadPart(param)
+ if err != nil {
+ s.s3Client.AbortMultipartUploadRequest(&s3svc.AbortMultipartUploadInput{
+ UploadId: aws.String(transactionID),
+ })
+ return err
+ }
+ var c s3svc.CompletedPart
+ c.PartNumber = aws.Int64(int64(partNum))
+ c.ETag = respChunk.ETag
+ info.Raw = c
+ return nil
+}
+
+func (s *Store) CompleteChunkWrite(
+ _ context.Context,
+ name string,
+ transactionID string,
+ chunkList []*stotypes.ChunkInfo,
+) error {
+ completes := make([]*s3svc.CompletedPart, 0, len(chunkList))
+ for _, chunk := range chunkList {
+ v := s3svc.CompletedPart{}
+ err := mapstructure.Decode(chunk.Raw, &v)
+ if err != nil {
+ return err
+ }
+ completes = append(completes, &v)
+ }
+ _, err := s.s3Client.CompleteMultipartUpload(&s3svc.CompleteMultipartUploadInput{
+ Bucket: aws.String(s.Bucket),
+ Key: aws.String(path.Join(s.BaseDir, name)),
+ UploadId: aws.String(transactionID),
+ MultipartUpload: &s3svc.CompletedMultipartUpload{
+ Parts: completes,
+ },
+ })
+ return err
+}
+
+func (s *Store) Move(ctx context.Context, src, dest string) error {
+ head, err := s.s3Client.HeadObject(&s3svc.HeadObjectInput{
+ Bucket: aws.String(s.Bucket),
+ Key: aws.String(path.Join(s.BaseDir, src))})
+ if err != nil {
+ return err
+ }
+
+ objectSize := aws.Int64Value(head.ContentLength)
+ copyLimit := int64(5*1024*1024*1024 - 1)
+
+ if objectSize < copyLimit { //nolint
+ _, err = s.s3Client.CopyObject(&s3svc.CopyObjectInput{
+ Bucket: aws.String(s.Bucket),
+ CopySource: aws.String(path.Join(s.Bucket, s.BaseDir, src)),
+ Key: aws.String(path.Join(s.BaseDir, dest))})
+ if err != nil {
+ return err
+ }
+ } else {
+ respInit, err := s.s3Client.CreateMultipartUpload(&s3svc.CreateMultipartUploadInput{
+ Bucket: aws.String(s.Bucket),
+ Key: aws.String(path.Join(s.BaseDir, dest)),
+ })
+ if err != nil {
+ return err
+ }
+ partLimit := int64(100*1024*1024 - 1)
+ partCount := objectSize / partLimit
+ if objectSize > partCount*partLimit {
+ partCount++
+ }
+
+ completes := make([]*s3svc.CompletedPart, 0)
+ for i := int64(0); i < partCount; i++ {
+ partNumber := aws.Int64(i + 1)
+ startRange := i * partLimit
+ stopRange := (i+1)*partLimit - 1
+ if i == partCount-1 {
+ stopRange = objectSize - 1
+ }
+ respChunk, err := s.s3Client.UploadPartCopy(&s3svc.UploadPartCopyInput{
+ Bucket: aws.String(s.Bucket),
+ CopySource: aws.String(path.Join(s.Bucket, s.BaseDir, src)),
+ CopySourceRange: aws.String(fmt.Sprintf("bytes=%d-%d", startRange, stopRange)),
+ Key: aws.String(path.Join(s.BaseDir, dest)),
+ PartNumber: partNumber,
+ UploadId: respInit.UploadId,
+ })
+ if err != nil {
+ s.s3Client.AbortMultipartUploadRequest(&s3svc.AbortMultipartUploadInput{
+ UploadId: respInit.UploadId,
+ })
+ return err
+ }
+
+ cPart := s3svc.CompletedPart{
+ ETag: aws.String(strings.Trim(*respChunk.CopyPartResult.ETag, "\"")),
+ PartNumber: partNumber,
+ }
+ completes = append(completes, &cPart)
+ }
+ _, err = s.s3Client.CompleteMultipartUpload(&s3svc.CompleteMultipartUploadInput{
+ Bucket: aws.String(s.Bucket),
+ Key: aws.String(path.Join(s.BaseDir, dest)),
+ UploadId: respInit.UploadId,
+ MultipartUpload: &s3svc.CompletedMultipartUpload{
+ Parts: completes,
+ },
+ })
+ if err != nil {
+ return err
+ }
+ }
+
+ err = s.Delete(ctx, src, true)
+ return err
+}
+
+func (s *Store) GetSize(_ context.Context, name string) (int64, error) {
+ head, err := s.s3Client.HeadObject(&s3svc.HeadObjectInput{
+ Bucket: aws.String(s.Bucket),
+ Key: aws.String(path.Join(s.BaseDir, name))})
+ if err != nil {
+ return 0, err
+ }
+ return aws.Int64Value(head.ContentLength), nil
+}
+
+func (s *Store) GetDigest(ctx context.Context, name string) (string, error) {
+ head, err := s.s3Client.HeadObject(&s3svc.HeadObjectInput{
+ Bucket: aws.String(s.Bucket),
+ Key: aws.String(path.Join(s.BaseDir, name))})
+ if err != nil {
+ return "", err
+ }
+ hashSha256, ok := head.Metadata["Sha256"]
+ if !ok {
+ hasher := sha256.New()
+ body, err := s.Get(ctx, name)
+ if err != nil {
+ return "", err
+ }
+ _, err = io.Copy(hasher, body)
+ if err != nil {
+ return "", err
+ }
+ return fmt.Sprintf("%x", hasher.Sum(nil)), nil
+ }
+ return *hashSha256, nil
+}
+
+func newS3Client(endpoint string, accessKey string, secretKey string) (*s3svc.S3, error) {
+ s3ForcePathStyle := true
+ sess, err := session.NewSessionWithOptions(session.Options{
+ Config: aws.Config{
+ Region: aws.String("default"),
+ Endpoint: &endpoint,
+ S3ForcePathStyle: &s3ForcePathStyle,
+ Credentials: credentials.NewStaticCredentials(accessKey, secretKey, ""),
+ },
+ })
+
+ return s3svc.New(sess), err
+}
+
+func newMockS3Client(t *testing.T, accessKey, secretKey, bucket string) *s3svc.S3 {
+ backend := s3mem.New()
+ faker := gofakes3.New(backend)
+ ts := httptest.NewServer(faker.Server())
+ // defer ts.Close()
+
+ // configure S3 client
+ s3Config := &aws.Config{
+ Credentials: credentials.NewStaticCredentials(accessKey, secretKey, ""),
+ Endpoint: aws.String(ts.URL),
+ Region: aws.String("default"),
+ DisableSSL: aws.Bool(true),
+ S3ForcePathStyle: aws.Bool(true),
+ }
+ newSession, err := session.NewSession(s3Config)
+ assert.Nil(t, err)
+
+ s3Client := s3svc.New(newSession)
+ cparams := &s3svc.CreateBucketInput{
+ Bucket: aws.String(bucket),
+ }
+ // Create a new bucket using the CreateBucket call.
+ _, err = s3Client.CreateBucket(cparams)
+ assert.Nil(t, err)
+ return s3Client
+}
diff --git a/internal/storage/s3/s3_test.go b/internal/storage/s3/s3_test.go
new file mode 100644
index 0000000..4a213ea
--- /dev/null
+++ b/internal/storage/s3/s3_test.go
@@ -0,0 +1,130 @@
+package s3
+
+import (
+ "bytes"
+ "context"
+ "io"
+ "testing"
+
+ stotypes "github.com/projecteru2/vmihub/internal/storage/types"
+ pkgutils "github.com/projecteru2/vmihub/pkg/utils"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestPut(t *testing.T) {
+ stor, err := New("", "xxxx", "yyyyyy", "eru", "images", t)
+ assert.Nil(t, err)
+
+ name := "test-put1"
+ content := []byte("hello world ")
+ digest, err := pkgutils.CalcDigestOfStr(string(content))
+ assert.Nil(t, err)
+ err = stor.Put(context.Background(), name, digest, bytes.NewReader([]byte(content)))
+ assert.Nil(t, err)
+
+ reader, err := stor.Get(context.Background(), name)
+ assert.Nil(t, err)
+ newVal, err := io.ReadAll(reader)
+ assert.Nil(t, err)
+ assert.Equal(t, string(content), string(newVal))
+}
+
+func TestPutWithChunk(t *testing.T) {
+ stor, err := New("", "xxxx", "yyyyyy", "eru", "images", t)
+ assert.Nil(t, err)
+
+ name := "test-put-with-chunk1"
+ content := bytes.Repeat([]byte("hello world "), 2048)
+ size := len(content)
+ chunkSize := 5 * 1024 * 1024
+ digest, err := pkgutils.CalcDigestOfStr(string(content))
+ assert.Nil(t, err)
+ err = stor.PutWithChunk(context.Background(), name, digest, size, chunkSize, bytes.NewReader([]byte(content)))
+ assert.Nil(t, err)
+
+ reader, err := stor.Get(context.Background(), name)
+ assert.Nil(t, err)
+ newVal, err := io.ReadAll(reader)
+ assert.Nil(t, err)
+ assert.Equal(t, string(content), string(newVal))
+}
+
+func TestChunkUpload(t *testing.T) {
+ stor, err := New("", "xxxx", "yyyyyy", "eru", "images", t)
+ assert.Nil(t, err)
+
+ chunkSize := 5 * 1024 * 1024
+ totalSize := 13 * 1024 * 1024
+ nChunks := 3
+
+ ctx := context.Background()
+ name := "test-chunk-upload1"
+ content := bytes.Repeat([]byte{'c'}, totalSize)
+ tID, err := stor.CreateChunkWrite(context.Background(), name)
+ assert.Nil(t, err)
+ assert.NotEmpty(t, tID)
+
+ ciInfoList := make([]*stotypes.ChunkInfo, 0, nChunks)
+ for idx := 0; idx < nChunks; idx++ {
+ start := idx * chunkSize
+ end := (idx + 1) * chunkSize
+ if end > totalSize {
+ end = totalSize
+ }
+ reader := bytes.NewReader(content[start:end])
+ ciInfo := &stotypes.ChunkInfo{
+ Idx: int(idx),
+ Size: int64(end - start),
+ ChunkSize: int64(chunkSize),
+ Digest: "",
+ In: reader,
+ }
+ ciInfoList = append(ciInfoList, ciInfo)
+ err := stor.ChunkWrite(context.Background(), name, tID, ciInfo)
+ assert.Nil(t, err)
+ }
+ err = stor.CompleteChunkWrite(ctx, name, tID, ciInfoList)
+ assert.Nil(t, err)
+
+ reader, err := stor.Get(context.Background(), name)
+ assert.Nil(t, err)
+ newVal, err := io.ReadAll(reader)
+ assert.Nil(t, err)
+ assert.Equal(t, string(content), string(newVal))
+}
+
+func TestChunkDownload(t *testing.T) {
+ stor, err := New("", "xxxx", "yyyyyy", "eru", "images", t)
+ assert.Nil(t, err)
+
+ chunkSize := 5 * 1024 * 1024
+ totalSize := 13 * 1024 * 1024
+ nChunks := 3
+
+ name := "test-chunk-download1"
+ content := bytes.Repeat([]byte{'c'}, totalSize)
+ digest, err := pkgutils.CalcDigestOfStr(string(content))
+ assert.Nil(t, err)
+ err = stor.Put(context.Background(), name, digest, bytes.NewReader(content))
+ assert.Nil(t, err)
+
+ reader, err := stor.Get(context.Background(), name)
+ assert.Nil(t, err)
+ newVal, err := io.ReadAll(reader)
+ assert.Nil(t, err)
+ assert.Equal(t, string(content), string(newVal))
+
+ ckNewVal := make([]byte, 0, totalSize)
+
+ for idx := 0; idx < nChunks; idx++ {
+ rc, err := stor.SeekRead(context.Background(), name, int64(idx*chunkSize))
+ assert.Nil(t, err)
+ reader := io.LimitReader(rc, int64(chunkSize))
+ buf, err := io.ReadAll(reader)
+ assert.True(t, err == nil || err == io.EOF)
+ assert.LessOrEqual(t, len(buf), int(chunkSize))
+ assert.Equal(t, content[0], buf[0])
+ ckNewVal = append(ckNewVal, buf...)
+ }
+ assert.Equal(t, string(content), string(ckNewVal))
+}
diff --git a/internal/storage/storage.go b/internal/storage/storage.go
new file mode 100644
index 0000000..8dcca74
--- /dev/null
+++ b/internal/storage/storage.go
@@ -0,0 +1,22 @@
+package storage
+
+import (
+ "context"
+ "io"
+
+ stotypes "github.com/projecteru2/vmihub/internal/storage/types"
+)
+
+type Storage interface { //nolint:interfacebloat
+ Get(ctx context.Context, name string) (io.ReadCloser, error)
+ Delete(ctx context.Context, name string, ignoreNotExists bool) error
+ Put(ctx context.Context, name string, digest string, in io.ReadSeeker) error
+ PutWithChunk(ctx context.Context, name string, digest string, size int, chunkSize int, in io.ReaderAt) error
+ SeekRead(ctx context.Context, name string, start int64) (io.ReadCloser, error)
+ CreateChunkWrite(ctx context.Context, name string) (string, error)
+ ChunkWrite(ctx context.Context, name string, transactionID string, info *stotypes.ChunkInfo) error
+ CompleteChunkWrite(ctx context.Context, name string, transactionID string, chunkList []*stotypes.ChunkInfo) error
+ Move(ctx context.Context, src, dest string) error
+ GetSize(ctx context.Context, name string) (int64, error)
+ GetDigest(ctx context.Context, name string) (string, error)
+}
diff --git a/internal/storage/types/types.go b/internal/storage/types/types.go
new file mode 100644
index 0000000..298bb2b
--- /dev/null
+++ b/internal/storage/types/types.go
@@ -0,0 +1,23 @@
+package types
+
+import (
+ "encoding/json"
+ "io"
+)
+
+type ChunkInfo struct {
+ Idx int `json:"idx"`
+ Size int64 `json:"size"`
+ ChunkSize int64 `json:"chunk_size"`
+ Digest string `json:"digest"`
+ In io.ReadSeeker `json:"-"`
+ Raw any `json:"raw"`
+}
+
+func (ci *ChunkInfo) MarshalBinary() (data []byte, err error) {
+ return json.Marshal(ci)
+}
+
+func (ci *ChunkInfo) UnmarshalBinary(data []byte) error {
+ return json.Unmarshal(data, ci)
+}
diff --git a/internal/testutils/utils.go b/internal/testutils/utils.go
new file mode 100644
index 0000000..70e7c6a
--- /dev/null
+++ b/internal/testutils/utils.go
@@ -0,0 +1,92 @@
+package testutils
+
+import (
+ "context"
+ "encoding/base64"
+ "fmt"
+ "net/http"
+ "testing"
+
+ "github.com/DATA-DOG/go-sqlmock"
+ "github.com/cockroachdb/errors"
+ "github.com/gin-contrib/sessions"
+ "github.com/gin-gonic/gin"
+ "github.com/projecteru2/core/log"
+ "github.com/projecteru2/core/types"
+ "github.com/projecteru2/vmihub/config"
+ "github.com/projecteru2/vmihub/internal/middlewares"
+ "github.com/projecteru2/vmihub/internal/models"
+ storFact "github.com/projecteru2/vmihub/internal/storage/factory"
+ storageMocks "github.com/projecteru2/vmihub/internal/storage/mocks"
+ "github.com/projecteru2/vmihub/internal/utils"
+ "github.com/projecteru2/vmihub/internal/utils/redissession"
+)
+
+func Prepare(ctx context.Context, t *testing.T) error {
+ cfg, err := config.LoadTestConfig()
+ if err != nil {
+ return err
+ }
+ logCfg := &types.ServerLogConfig{
+ Level: cfg.Log.Level,
+ UseJSON: cfg.Log.UseJSON,
+ Filename: cfg.Log.Filename,
+ MaxSize: cfg.Log.MaxSize,
+ MaxAge: cfg.Log.MaxAge,
+ MaxBackups: cfg.Log.MaxBackups,
+ }
+ if err := log.SetupLog(ctx, logCfg, cfg.Log.SentryDSN); err != nil {
+ return errors.Newf("Can't initialize log: %v\n", err)
+ }
+ if err := models.Init(&cfg.Mysql, t); err != nil {
+ return err
+ }
+ cfg.Storage.Type = "mock"
+ if _, err := storFact.Init(&cfg.Storage); err != nil {
+ return err
+ }
+
+ utils.SetupRedis(&cfg.Redis, t)
+ return nil
+}
+
+func PrepareGinEngine() (*gin.Engine, error) {
+ r := gin.New()
+ redisCli := utils.GetRedisConn()
+ sessStor, err := redissession.NewStore(context.TODO(), redisCli)
+ if err != nil {
+ return nil, err
+ }
+
+ r.Use(sessions.Sessions("mysession", sessStor))
+ r.Use(middlewares.Cors())
+ r.Use(middlewares.Logger("vmihub"))
+ return r, nil
+}
+
+func PrepareUserData(username, passwd string) error {
+ ePasswd, err := utils.EncryptPassword(passwd)
+ if err != nil {
+ return err
+ }
+
+ tblName := ((*models.User)(nil)).TableName()
+ columes := ((*models.User)(nil)).ColumnNames()
+
+ wantRows := sqlmock.NewRows([]string{"id", "username", "password"}).
+ AddRow(1, username, ePasswd)
+ models.Mock.ExpectQuery(fmt.Sprintf("SELECT %s FROM %s WHERE username = ?", columes, tblName)).
+ WithArgs(username).
+ WillReturnRows(wantRows)
+ return nil
+}
+
+func GetMockStorage() *storageMocks.Storage {
+ sto := storFact.Instance()
+ return sto.(*storageMocks.Storage)
+}
+
+func AddAuth(req *http.Request, username, password string) {
+ val := base64.StdEncoding.EncodeToString([]byte(username + ":" + password))
+ req.Header.Set("Authorization", fmt.Sprintf("Basic %s", val))
+}
diff --git a/internal/utils/constant.go b/internal/utils/constant.go
new file mode 100644
index 0000000..352ab32
--- /dev/null
+++ b/internal/utils/constant.go
@@ -0,0 +1,9 @@
+package utils
+
+const (
+ NameRegex = `^[a-z0-9]+(?:[._-][a-z0-9]+)*$`
+ KB = 1024
+ MB = 1024 * KB
+ GB = 1024 * MB
+ TB = 1024 * GB
+)
diff --git a/internal/utils/idgen/idgen.go b/internal/utils/idgen/idgen.go
new file mode 100644
index 0000000..3f324bc
--- /dev/null
+++ b/internal/utils/idgen/idgen.go
@@ -0,0 +1,10 @@
+package idgen
+
+import (
+ "github.com/btcsuite/btcutil/base58"
+)
+
+func NextSID() string {
+ id := NewObjectID()
+ return base58.Encode(id[:])
+}
diff --git a/internal/utils/idgen/objectid.go b/internal/utils/idgen/objectid.go
new file mode 100644
index 0000000..f86fd53
--- /dev/null
+++ b/internal/utils/idgen/objectid.go
@@ -0,0 +1,206 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+//
+// Based on gopkg.in/mgo.v2/bson by Gustavo Niemeyer
+// See THIRD-PARTY-NOTICES for original license terms.
+
+package idgen
+
+import (
+ "crypto/rand"
+ "encoding"
+ "encoding/binary"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "sync/atomic"
+ "time"
+)
+
+// ErrInvalidHex indicates that a hex string cannot be converted to an ObjectID.
+var ErrInvalidHex = errors.New("the provided hex string is not a valid ObjectID")
+
+// ObjectID is the BSON ObjectID type.
+type ObjectID [12]byte
+
+// NilObjectID is the zero value for ObjectID.
+var NilObjectID ObjectID
+
+var objectIDCounter = readRandomUint32()
+var processUnique = processUniqueBytes()
+
+var _ encoding.TextMarshaler = ObjectID{}
+var _ encoding.TextUnmarshaler = &ObjectID{}
+
+// NewObjectID generates a new ObjectID.
+func NewObjectID() ObjectID {
+ return NewObjectIDFromTimestamp(time.Now())
+}
+
+// NewObjectIDFromTimestamp generates a new ObjectID based on the given time.
+func NewObjectIDFromTimestamp(timestamp time.Time) ObjectID {
+ var b [12]byte
+
+ binary.BigEndian.PutUint32(b[0:4], uint32(timestamp.Unix()))
+ copy(b[4:9], processUnique[:])
+ putUint24(b[9:12], atomic.AddUint32(&objectIDCounter, 1))
+
+ return b
+}
+
+// Timestamp extracts the time part of the ObjectId.
+func (id ObjectID) Timestamp() time.Time {
+ unixSecs := binary.BigEndian.Uint32(id[0:4])
+ return time.Unix(int64(unixSecs), 0).UTC()
+}
+
+// Hex returns the hex encoding of the ObjectID as a string.
+func (id ObjectID) Hex() string {
+ var buf [24]byte
+ hex.Encode(buf[:], id[:])
+ return string(buf[:])
+}
+
+func (id ObjectID) String() string {
+ return fmt.Sprintf("ObjectID(%q)", id.Hex())
+}
+
+// IsZero returns true if id is the empty ObjectID.
+func (id ObjectID) IsZero() bool {
+ return id == NilObjectID
+}
+
+// ObjectIDFromHex creates a new ObjectID from a hex string. It returns an error if the hex string is not a
+// valid ObjectID.
+func ObjectIDFromHex(s string) (ObjectID, error) {
+ if len(s) != 24 {
+ return NilObjectID, ErrInvalidHex
+ }
+
+ var oid [12]byte
+ _, err := hex.Decode(oid[:], []byte(s))
+ if err != nil {
+ return NilObjectID, err
+ }
+
+ return oid, nil
+}
+
+// IsValidObjectID returns true if the provided hex string represents a valid ObjectID and false if not.
+//
+// Deprecated: Use ObjectIDFromHex and check the error instead.
+func IsValidObjectID(s string) bool {
+ _, err := ObjectIDFromHex(s)
+ return err == nil
+}
+
+// MarshalText returns the ObjectID as UTF-8-encoded text. Implementing this allows us to use ObjectID
+// as a map key when marshaling JSON. See https://pkg.go.dev/encoding#TextMarshaler
+func (id ObjectID) MarshalText() ([]byte, error) {
+ return []byte(id.Hex()), nil
+}
+
+// UnmarshalText populates the byte slice with the ObjectID. Implementing this allows us to use ObjectID
+// as a map key when unmarshalling JSON. See https://pkg.go.dev/encoding#TextUnmarshaler
+func (id *ObjectID) UnmarshalText(b []byte) error {
+ oid, err := ObjectIDFromHex(string(b))
+ if err != nil {
+ return err
+ }
+ *id = oid
+ return nil
+}
+
+// MarshalJSON returns the ObjectID as a string
+func (id ObjectID) MarshalJSON() ([]byte, error) {
+ return json.Marshal(id.Hex())
+}
+
+// UnmarshalJSON populates the byte slice with the ObjectID. If the byte slice is 24 bytes long, it
+// will be populated with the hex representation of the ObjectID. If the byte slice is twelve bytes
+// long, it will be populated with the BSON representation of the ObjectID. This method also accepts empty strings and
+// decodes them as NilObjectID. For any other inputs, an error will be returned.
+func (id *ObjectID) UnmarshalJSON(b []byte) error {
+ // Ignore "null" to keep parity with the standard library. Decoding a JSON null into a non-pointer ObjectID field
+ // will leave the field unchanged. For pointer values, encoding/json will set the pointer to nil and will not
+ // enter the UnmarshalJSON hook.
+ if string(b) == "null" {
+ return nil
+ }
+
+ var err error
+ switch len(b) {
+ case 12:
+ copy(id[:], b)
+ default:
+ // Extended JSON
+ var res any
+ err := json.Unmarshal(b, &res)
+ if err != nil {
+ return err
+ }
+ str, ok := res.(string)
+ if !ok {
+ m, ok := res.(map[string]any)
+ if !ok {
+ return errors.New("not an extended JSON ObjectID")
+ }
+ oid, ok := m["$oid"]
+ if !ok {
+ return errors.New("not an extended JSON ObjectID")
+ }
+ str, ok = oid.(string)
+ if !ok {
+ return errors.New("not an extended JSON ObjectID")
+ }
+ }
+
+ // An empty string is not a valid ObjectID, but we treat it as a special value that decodes as NilObjectID.
+ if len(str) == 0 {
+ copy(id[:], NilObjectID[:])
+ return nil
+ }
+
+ if len(str) != 24 {
+ return fmt.Errorf("cannot unmarshal into an ObjectID, the length must be 24 but it is %d", len(str))
+ }
+
+ _, err = hex.Decode(id[:], []byte(str))
+ if err != nil {
+ return err
+ }
+ }
+
+ return err
+}
+
+func processUniqueBytes() [5]byte {
+ var b [5]byte
+ _, err := io.ReadFull(rand.Reader, b[:])
+ if err != nil {
+ panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %w", err))
+ }
+
+ return b
+}
+
+func readRandomUint32() uint32 {
+ var b [4]byte
+ _, err := io.ReadFull(rand.Reader, b[:])
+ if err != nil {
+ panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %w", err))
+ }
+
+ return (uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
+}
+
+func putUint24(b []byte, v uint32) {
+ b[0] = byte(v >> 16)
+ b[1] = byte(v >> 8)
+ b[2] = byte(v)
+}
diff --git a/internal/utils/idgen/objectid_test.go b/internal/utils/idgen/objectid_test.go
new file mode 100644
index 0000000..365bcd2
--- /dev/null
+++ b/internal/utils/idgen/objectid_test.go
@@ -0,0 +1,268 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package idgen
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestString(t *testing.T) {
+ id := NewObjectID()
+ require.Contains(t, id.String(), id.Hex())
+}
+
+func BenchmarkHex(b *testing.B) {
+ id := NewObjectID()
+ for i := 0; i < b.N; i++ {
+ id.Hex()
+ }
+}
+
+func BenchmarkObjectIDFromHex(b *testing.B) {
+ id := NewObjectID().Hex()
+ for i := 0; i < b.N; i++ {
+ _, _ = ObjectIDFromHex(id)
+ }
+}
+
+func BenchmarkNewObjectIDFromTimestamp(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ timestamp := time.Now().Add(time.Duration(i) * time.Millisecond)
+ _ = NewObjectIDFromTimestamp(timestamp)
+ }
+}
+
+func TestFromHex_RoundTrip(t *testing.T) {
+ before := NewObjectID()
+ after, err := ObjectIDFromHex(before.Hex())
+ require.NoError(t, err)
+
+ require.Equal(t, before, after)
+}
+
+func TestFromHex_InvalidHex(t *testing.T) {
+ _, err := ObjectIDFromHex("this is not a valid hex string!")
+ require.Error(t, err)
+}
+
+func TestFromHex_WrongLength(t *testing.T) {
+ _, err := ObjectIDFromHex("deadbeef")
+ require.Equal(t, ErrInvalidHex, err)
+}
+
+func TestIsValidObjectID(t *testing.T) {
+ testCases := []struct {
+ givenID string
+ expected bool
+ }{
+ {
+ givenID: "5ef7fdd91c19e3222b41b839",
+ expected: true,
+ },
+ {
+ givenID: "5ef7fdd91c19e3222b41b83",
+ expected: false,
+ },
+ }
+
+ for _, testcase := range testCases {
+ got := IsValidObjectID(testcase.givenID)
+ assert.Equal(t, testcase.expected, got, "expected hex string to be valid ObjectID: %v, got %v", testcase.expected, got)
+ }
+}
+
+func TestTimeStamp(t *testing.T) {
+ testCases := []struct {
+ Hex string
+ Expected string
+ }{
+ {
+ "000000001111111111111111",
+ "1970-01-01 00:00:00 +0000 UTC",
+ },
+ {
+ "7FFFFFFF1111111111111111",
+ "2038-01-19 03:14:07 +0000 UTC",
+ },
+ {
+ "800000001111111111111111",
+ "2038-01-19 03:14:08 +0000 UTC",
+ },
+ {
+ "FFFFFFFF1111111111111111",
+ "2106-02-07 06:28:15 +0000 UTC",
+ },
+ }
+
+ for _, testcase := range testCases {
+ id, err := ObjectIDFromHex(testcase.Hex)
+ require.NoError(t, err)
+ secs := int64(binary.BigEndian.Uint32(id[0:4]))
+ timestamp := time.Unix(secs, 0).UTC()
+ require.Equal(t, testcase.Expected, timestamp.String())
+ }
+}
+
+func TestCreateFromTime(t *testing.T) {
+ testCases := []struct {
+ time string
+ Expected string
+ }{
+ {
+ "1970-01-01T00:00:00.000Z",
+ "00000000",
+ },
+ {
+ "2038-01-19T03:14:07.000Z",
+ "7fffffff",
+ },
+ {
+ "2038-01-19T03:14:08.000Z",
+ "80000000",
+ },
+ {
+ "2106-02-07T06:28:15.000Z",
+ "ffffffff",
+ },
+ }
+
+ layout := "2006-01-02T15:04:05.000Z"
+ for _, testcase := range testCases {
+ time, err := time.Parse(layout, testcase.time)
+ require.NoError(t, err)
+
+ id := NewObjectIDFromTimestamp(time)
+ timeStr := hex.EncodeToString(id[0:4])
+
+ require.Equal(t, testcase.Expected, timeStr)
+ }
+}
+
+func TestGenerationTime(t *testing.T) {
+ testCases := []struct {
+ hex string
+ Expected string
+ }{
+ {
+ "000000001111111111111111",
+ "1970-01-01 00:00:00 +0000 UTC",
+ },
+ {
+ "7FFFFFFF1111111111111111",
+ "2038-01-19 03:14:07 +0000 UTC",
+ },
+ {
+ "800000001111111111111111",
+ "2038-01-19 03:14:08 +0000 UTC",
+ },
+ {
+ "FFFFFFFF1111111111111111",
+ "2106-02-07 06:28:15 +0000 UTC",
+ },
+ }
+
+ for _, testcase := range testCases {
+ id, err := ObjectIDFromHex(testcase.hex)
+ require.NoError(t, err)
+
+ genTime := id.Timestamp()
+ require.Equal(t, testcase.Expected, genTime.String())
+ }
+}
+
+func TestCounterOverflow(t *testing.T) {
+ objectIDCounter = 0xFFFFFFFF
+ NewObjectID()
+ require.Equal(t, uint32(0), objectIDCounter)
+}
+
+func TestObjectID_MarshalJSONMap(t *testing.T) {
+ type mapOID struct {
+ Map map[ObjectID]string
+ }
+
+ oid := NewObjectID()
+ expectedJSON := []byte(fmt.Sprintf(`{"Map":{%q:"foo"}}`, oid.Hex()))
+ data := mapOID{
+ Map: map[ObjectID]string{oid: "foo"},
+ }
+
+ out, err := json.Marshal(&data)
+ require.NoError(t, err)
+ require.Equal(t, expectedJSON, out)
+}
+
+func TestObjectID_UnmarshalJSONMap(t *testing.T) {
+ type mapOID struct {
+ Map map[ObjectID]string
+ }
+ oid := NewObjectID()
+ mapOIDJSON := []byte(fmt.Sprintf(`{"Map":{%q:"foo"}}`, oid.Hex()))
+ expectedData := mapOID{
+ Map: map[ObjectID]string{oid: "foo"},
+ }
+
+ data := mapOID{}
+ err := json.Unmarshal(mapOIDJSON, &data)
+ require.NoError(t, err)
+ require.Equal(t, expectedData, data)
+}
+
+func TestObjectID_UnmarshalJSON(t *testing.T) {
+ oid := NewObjectID()
+
+ hexJSON := fmt.Sprintf(`{"foo": %q}`, oid.Hex())
+ extJSON := fmt.Sprintf(`{"foo": {"$oid": %q}}`, oid.Hex())
+ emptyStringJSON := `{"foo": ""}`
+ nullJSON := `{"foo": null}`
+
+ testCases := []struct {
+ name string
+ jsonString string
+ expected ObjectID
+ }{
+ {"hex bytes", hexJSON, oid},
+ {"extended JSON", extJSON, oid},
+ {"empty string", emptyStringJSON, NilObjectID},
+ {"null", nullJSON, NilObjectID},
+ }
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ var got map[string]ObjectID
+ err := json.Unmarshal([]byte(tc.jsonString), &got)
+ assert.Nil(t, err, "Unmarshal error: %v", err)
+
+ gotOid := got["foo"]
+ assert.Equal(t, tc.expected, gotOid, "expected ObjectID %s, got %s", tc.expected, gotOid)
+ })
+ }
+}
+
+func TestObjectID_MarshalText(t *testing.T) {
+ oid := ObjectID{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xA, 0xB}
+ b, err := oid.MarshalText()
+ assert.Nil(t, err, "MarshalText error: %v", err)
+ want := "000102030405060708090a0b"
+ got := string(b)
+ assert.Equal(t, want, got, "want %v, got %v", want, got)
+}
+
+func TestObjectID_UnmarshalText(t *testing.T) {
+ var oid ObjectID
+ err := oid.UnmarshalText([]byte("000102030405060708090a0b"))
+ assert.Nil(t, err, "UnmarshalText error: %v", err)
+ want := ObjectID{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xA, 0xB}
+ assert.Equal(t, want, oid, "want %v, got %v", want, oid)
+}
diff --git a/internal/utils/image.go b/internal/utils/image.go
new file mode 100644
index 0000000..441fbb8
--- /dev/null
+++ b/internal/utils/image.go
@@ -0,0 +1,21 @@
+package utils
+
+import "strings"
+
+const (
+ FakeTag = "0000000000"
+)
+
+func IsDefaultTag(tag string) bool {
+ return tag == "" || tag == "latest"
+}
+
+func NormalizeTag(tag string, digest string) string {
+ if IsDefaultTag(tag) {
+ if digest == "" {
+ return FakeTag
+ }
+ return strings.TrimPrefix(digest, "sha256:")[0:10]
+ }
+ return tag
+}
diff --git a/internal/utils/redis.go b/internal/utils/redis.go
new file mode 100644
index 0000000..58d0c53
--- /dev/null
+++ b/internal/utils/redis.go
@@ -0,0 +1,99 @@
+package utils
+
+import (
+ "context"
+ "encoding/json"
+ "testing"
+ "time"
+
+ "github.com/alicebob/miniredis/v2"
+ "github.com/go-redsync/redsync/v4"
+ "github.com/go-redsync/redsync/v4/redis/goredis/v9"
+ "github.com/projecteru2/vmihub/config"
+ "github.com/redis/go-redis/v9"
+)
+
+var (
+ cli *redis.Client
+ MockRedis *miniredis.Miniredis
+ rs *redsync.Redsync
+)
+
+func NewRedisCient(cfg *config.RedisConfig) (ans *redis.Client) {
+
+ if len(cfg.SentinelAddrs) > 0 {
+ ans = redis.NewFailoverClient(&redis.FailoverOptions{
+ MasterName: cfg.MasterName,
+ SentinelAddrs: cfg.SentinelAddrs,
+ DB: cfg.DB,
+ Username: cfg.Username,
+ Password: cfg.Password,
+ })
+ } else {
+ ans = redis.NewClient(&redis.Options{
+ Addr: cfg.Addr,
+ DB: cfg.DB,
+ Username: cfg.Username,
+ Password: cfg.Password,
+ })
+ }
+ return
+}
+
+func SetupRedis(cfg *config.RedisConfig, t *testing.T) {
+ if t != nil {
+ MockRedis = miniredis.RunT(t)
+ cli = redis.NewClient(&redis.Options{
+ Addr: MockRedis.Addr(), // Redis 服务器地址
+ })
+ return
+ }
+ cli = NewRedisCient(cfg)
+ rs = redsync.New(goredis.NewPool(cli))
+}
+
+func GetRedisConn() *redis.Client {
+ return cli
+}
+
+func SetObjToRedis(ctx context.Context, k string, obj any, expiration time.Duration) error {
+ bs, err := json.Marshal(obj)
+ if err != nil {
+ return err
+ }
+ return cli.Set(ctx, k, bs, expiration).Err()
+}
+
+func DeleteObjectsInRedis(ctx context.Context, keys ...string) error {
+ return cli.Del(ctx, keys...).Err()
+}
+
+func GetObjFromRedis(ctx context.Context, k string, obj any) error {
+ v, err := cli.Get(ctx, k).Result()
+ if err != nil {
+ return err
+ }
+ return json.Unmarshal([]byte(v), obj)
+}
+
+func NewRedisMutex(name string, expiry time.Duration) *redsync.Mutex {
+ return rs.NewMutex(name, redsync.WithExpiry(expiry))
+}
+
+func CleanRedisMutex(name string) error {
+ return cli.Del(context.TODO(), name).Err()
+}
+
+func LockRedisKey(ctx context.Context, key string, expiry time.Duration) (func(), error) {
+ mtx := NewRedisMutex(key, expiry)
+ if err := mtx.LockContext(ctx); err != nil {
+ return nil, err
+ }
+ return func() {
+ retryTask := NewRetryTask(ctx, 3, func() error {
+ _, err := mtx.Unlock()
+ return err
+ })
+ _ = retryTask.Run(ctx)
+ }, nil
+}
diff --git a/internal/utils/redis_test.go b/internal/utils/redis_test.go
new file mode 100644
index 0000000..eb6bc45
--- /dev/null
+++ b/internal/utils/redis_test.go
@@ -0,0 +1,75 @@
+package utils
+
+import (
+ "context"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestRedis(t *testing.T) {
+ SetupRedis(nil, t)
+ c := GetRedisConn()
+ s := MockRedis
+ ans := c.Set(context.Background(), "foo", "bar", 0)
+ assert.Nil(t, ans.Err())
+
+ // Optionally check values in redis...
+ // got, err := s.Get("foo")
+ // assert.Nil(t, err)
+ // assert.Equal(t, got, "bar")
+
+ // // ... or use a helper for that:
+ // s.CheckGet(t, "foo", "bar")
+ {
+ v, err := c.Get(context.Background(), "foo").Result()
+ assert.Nil(t, err)
+ assert.Equal(t, "bar", v)
+
+ err = c.Set(context.Background(), "foo1", 1234, 0).Err()
+ assert.Nil(t, err)
+ v, err = c.Get(context.Background(), "foo1").Result()
+ assert.Nil(t, err)
+ iv, err := strconv.Atoi(v)
+ assert.Nil(t, err)
+ assert.Equal(t, 1234, iv)
+
+ err = c.HSet(context.Background(), "foo2", 1, "1", 2, "2").Err()
+ assert.Nil(t, err)
+ kv, err := c.HGetAll(context.Background(), "foo2").Result()
+ assert.Nil(t, err)
+ for k, v := range kv {
+ assert.Equal(t, k, v)
+ }
+ }
+ {
+ hsetAns := c.HSet(context.Background(), "hkey", "info", 1, "slices", "{}", "bool", strconv.FormatBool(true))
+ assert.Nil(t, hsetAns.Err())
+ hgetAns := c.HGetAll(context.Background(), "hkey")
+ assert.Nil(t, hgetAns.Err())
+ assert.Len(t, hgetAns.Val(), 3)
+ for k, v := range hgetAns.Val() {
+ switch k {
+ case "info":
+ assert.Equal(t, "1", v)
+ case "slices":
+ assert.Equal(t, "{}", v)
+ case "bool":
+ assert.Equal(t, "true", v)
+ val, err := strconv.ParseBool(v)
+ assert.Nil(t, err)
+ assert.True(t, val)
+ default:
+ assert.Failf(t, "invalid key %s", k)
+ }
+ }
+ }
+
+ // TTL and expiration:
+ s.Set("foo", "bar")
+ s.SetTTL("foo", 10*time.Second)
+ s.FastForward(11 * time.Second)
+ assert.False(t, s.Exists("foo"))
+}
diff --git a/internal/utils/redissession/redis.go b/internal/utils/redissession/redis.go
new file mode 100644
index 0000000..4ee8e8b
--- /dev/null
+++ b/internal/utils/redissession/redis.go
@@ -0,0 +1,55 @@
+package redissession
+
+import (
+ "context"
+ "errors"
+
+ ginsessions "github.com/gin-contrib/sessions"
+ "github.com/rbcervilla/redisstore/v9"
+ "github.com/redis/go-redis/v9"
+)
+
+type Store interface {
+ ginsessions.Store
+}
+
+// NewStore - create new session store with given redis client interface
+func NewStore(ctx context.Context, client redis.UniversalClient) (ginsessions.Store, error) {
+ innerStore, err := redisstore.NewRedisStore(ctx, client)
+ if err != nil {
+ return nil, err
+ }
+ return &store{innerStore}, nil
+}
+
+type store struct {
+ *redisstore.RedisStore
+}
+
+// GetRedisStore get the actual woking store.
+// Ref: https://godoc.org/github.com/boj/redistore#RediStore
+func GetRedisStore(s Store) (rediStore *redisstore.RedisStore, err error) {
+ realStore, ok := s.(*store)
+ if !ok {
+ err = errors.New("unable to get the redis store: Store isn't *store")
+ return
+ }
+
+ rediStore = realStore.RedisStore
+ return
+}
+
+// SetKeyPrefix sets the key prefix in the redis database.
+func SetKeyPrefix(s Store, prefix string) error {
+ rediStore, err := GetRedisStore(s)
+ if err != nil {
+ return err
+ }
+
+ rediStore.KeyPrefix(prefix)
+ return nil
+}
+
+func (c *store) Options(options ginsessions.Options) {
+ c.RedisStore.Options(*options.ToGorillaOptions())
+}
diff --git a/internal/utils/retry.go b/internal/utils/retry.go
new file mode 100644
index 0000000..b34548b
--- /dev/null
+++ b/internal/utils/retry.go
@@ -0,0 +1,72 @@
+package utils
+
+import (
+ "context"
+ "time"
+
+ "github.com/projecteru2/core/log"
+)
+
+// RetryTask .
+type RetryTask struct {
+ ctx context.Context
+ cancel context.CancelFunc
+ Func func() error
+ MaxAttempts int
+}
+
+// Run start running retry task
+func (r *RetryTask) Run(ctx context.Context) error {
+ logger := log.WithFunc("Run")
+ logger.Debug(ctx, "start")
+ defer r.Stop(ctx)
+
+ var err error
+ interval := 1
+ timer := time.NewTimer(0)
+ defer timer.Stop()
+
+ for i := 0; i < r.MaxAttempts; i++ {
+ select {
+ case <-r.ctx.Done():
+ logger.Debug(ctx, "abort")
+ return r.ctx.Err()
+ case <-timer.C:
+ err = r.Func()
+ if err == nil {
+ return nil
+ }
+ logger.Debugf(ctx, "will retry after %v seconds", interval)
+ timer.Reset(time.Duration(interval) * time.Second)
+ interval *= 2
+ }
+ }
+ return err
+}
+
+// Stop stops running task
+func (r *RetryTask) Stop(context.Context) {
+ r.cancel()
+}
+
+// NewRetryTask .
+func NewRetryTask(ctx context.Context, maxAttempts int, f func() error) *RetryTask {
+ // make sure to execute at least once
+ if maxAttempts < 1 {
+ maxAttempts = 1
+ }
+ ctx, cancel := context.WithCancel(ctx)
+ return &RetryTask{
+ ctx: ctx,
+ cancel: cancel,
+ MaxAttempts: maxAttempts,
+ Func: f,
+ }
+}
+
+// BackoffRetry retries up to `maxAttempts` times, and the interval will grow exponentially
+func BackoffRetry(ctx context.Context, maxAttempts int, f func() error) error {
+ retryTask := NewRetryTask(ctx, maxAttempts, f)
+ defer retryTask.Stop(ctx)
+ return retryTask.Run(ctx)
+}
diff --git a/internal/utils/retry_test.go b/internal/utils/retry_test.go
new file mode 100644
index 0000000..4a000df
--- /dev/null
+++ b/internal/utils/retry_test.go
@@ -0,0 +1,45 @@
+package utils
+
+import (
+ "context"
+ "errors"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestBackoffRetry(t *testing.T) {
+ var errNotSuccess = errors.New("not success")
+ i := 0
+ f := func() error {
+ i++
+ if i < 4 {
+ return errNotSuccess
+ }
+ return nil
+ }
+ assert.Nil(t, BackoffRetry(context.Background(), 10, f))
+ assert.Equal(t, 4, i)
+
+ i = 0
+ assert.Equal(t, errNotSuccess, BackoffRetry(context.Background(), 0, f))
+ assert.Equal(t, 1, i)
+}
+
+func TestBackoffRetryWithCancel(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ defer cancel()
+
+ var errNotSuccess = errors.New("not success")
+ i := 0
+ f := func() error {
+ i++
+ if i < 4 {
+ return errNotSuccess
+ }
+ return nil
+ }
+ assert.Equal(t, context.DeadlineExceeded, BackoffRetry(ctx, 10, f))
+ assert.NotEqual(t, 4, i)
+}
diff --git a/internal/utils/sh/mock.go b/internal/utils/sh/mock.go
new file mode 100644
index 0000000..0d37254
--- /dev/null
+++ b/internal/utils/sh/mock.go
@@ -0,0 +1,12 @@
+package sh
+
+// NewMockShell .
+func NewMockShell(s Shell) func() {
+ var old = shell
+
+ shell = s
+
+ return func() {
+ shell = old
+ }
+}
diff --git a/internal/utils/sh/mocks/Shell.go b/internal/utils/sh/mocks/Shell.go
new file mode 100644
index 0000000..d8548eb
--- /dev/null
+++ b/internal/utils/sh/mocks/Shell.go
@@ -0,0 +1,135 @@
+// Code generated by mockery v2.26.1. DO NOT EDIT.
+
+package mocks
+
+import (
+ context "context"
+ io "io"
+
+ mock "github.com/stretchr/testify/mock"
+)
+
+// Shell is an autogenerated mock type for the Shell type
+type Shell struct {
+ mock.Mock
+}
+
+// Copy provides a mock function with given fields: src, dest
+func (_m *Shell) Copy(src string, dest string) error {
+ ret := _m.Called(src, dest)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(string, string) error); ok {
+ r0 = rf(src, dest)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Exec provides a mock function with given fields: ctx, name, args
+func (_m *Shell) Exec(ctx context.Context, name string, args ...string) error {
+ _va := make([]interface{}, len(args))
+ for _i := range args {
+ _va[_i] = args[_i]
+ }
+ var _ca []interface{}
+ _ca = append(_ca, ctx, name)
+ _ca = append(_ca, _va...)
+ ret := _m.Called(_ca...)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, ...string) error); ok {
+ r0 = rf(ctx, name, args...)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// ExecInOut provides a mock function with given fields: ctx, env, stdin, name, args
+func (_m *Shell) ExecInOut(ctx context.Context, env map[string]string, stdin io.Reader, name string, args ...string) ([]byte, []byte, error) {
+ _va := make([]interface{}, len(args))
+ for _i := range args {
+ _va[_i] = args[_i]
+ }
+ var _ca []interface{}
+ _ca = append(_ca, ctx, env, stdin, name)
+ _ca = append(_ca, _va...)
+ ret := _m.Called(_ca...)
+
+ var r0 []byte
+ var r1 []byte
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, map[string]string, io.Reader, string, ...string) ([]byte, []byte, error)); ok {
+ return rf(ctx, env, stdin, name, args...)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, map[string]string, io.Reader, string, ...string) []byte); ok {
+ r0 = rf(ctx, env, stdin, name, args...)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]byte)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, map[string]string, io.Reader, string, ...string) []byte); ok {
+ r1 = rf(ctx, env, stdin, name, args...)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).([]byte)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, map[string]string, io.Reader, string, ...string) error); ok {
+ r2 = rf(ctx, env, stdin, name, args...)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// Move provides a mock function with given fields: src, dest
+func (_m *Shell) Move(src string, dest string) error {
+ ret := _m.Called(src, dest)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(string, string) error); ok {
+ r0 = rf(src, dest)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Remove provides a mock function with given fields: fpth
+func (_m *Shell) Remove(fpth string) error {
+ ret := _m.Called(fpth)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(string) error); ok {
+ r0 = rf(fpth)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+type mockConstructorTestingTNewShell interface {
+ mock.TestingT
+ Cleanup(func())
+}
+
+// NewShell creates a new instance of Shell. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+func NewShell(t mockConstructorTestingTNewShell) *Shell {
+ mock := &Shell{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/internal/utils/sh/sh.go b/internal/utils/sh/sh.go
new file mode 100644
index 0000000..4acdf91
--- /dev/null
+++ b/internal/utils/sh/sh.go
@@ -0,0 +1,47 @@
+package sh
+
+import (
+ "context"
+ "io"
+)
+
+var shell Shell = shx{}
+
+// GetShell .
+func GetShell() Shell {
+ return shell
+}
+
+// Shell .
+type Shell interface {
+ Copy(src, dest string) error
+ Move(src, dest string) error
+ Remove(fpth string) error
+ Exec(ctx context.Context, name string, args ...string) error
+ ExecInOut(ctx context.Context, env map[string]string, stdin io.Reader, name string, args ...string) ([]byte, []byte, error)
+}
+
+// Remove .
+func Remove(fpth string) error {
+ return shell.Remove(fpth)
+}
+
+// Move .
+func Move(src, dest string) error {
+ return shell.Move(src, dest)
+}
+
+// Copy .
+func Copy(src, dest string) error {
+ return shell.Copy(src, dest)
+}
+
+// ExecInOut .
+func ExecInOut(ctx context.Context, env map[string]string, stdin io.Reader, name string, args ...string) ([]byte, []byte, error) {
+ return shell.ExecInOut(ctx, env, stdin, name, args...)
+}
+
+// ExecContext .
+func ExecContext(ctx context.Context, name string, args ...string) error {
+ return shell.Exec(ctx, name, args...)
+}
diff --git a/internal/utils/sh/shx.go b/internal/utils/sh/shx.go
new file mode 100644
index 0000000..bd41e7a
--- /dev/null
+++ b/internal/utils/sh/shx.go
@@ -0,0 +1,66 @@
+package sh
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "os/exec"
+
+ "github.com/cockroachdb/errors"
+)
+
+type shx struct{}
+
+func (s shx) Remove(fpth string) error {
+ return s.Exec(context.Background(), "rm", "-rf", fpth)
+}
+
+func (s shx) Move(src, dest string) error {
+ return s.Exec(context.Background(), "mv", src, dest)
+}
+
+func (s shx) Copy(src, dest string) error {
+ return s.Exec(context.Background(), "cp", src, dest)
+}
+
+func (s shx) ExecInOut(ctx context.Context, env map[string]string, stdin io.Reader, name string, args ...string) ([]byte, []byte, error) {
+ var stdout, stderr bytes.Buffer
+
+ cmd := exec.CommandContext(ctx, name, args...)
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+ cmd.Stdin = stdin
+
+ for k, v := range env {
+ cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", k, v))
+ }
+
+ err := cmd.Run()
+
+ return stdout.Bytes(), stderr.Bytes(), err
+}
+
+func (s shx) Exec(ctx context.Context, name string, args ...string) error {
+ var cmd = exec.CommandContext(ctx, name, args...)
+
+ stderr, err := cmd.StderrPipe()
+ if err != nil {
+ return errors.Wrap(err, "")
+ }
+
+ if err := cmd.Start(); err != nil {
+ return errors.Wrap(err, "")
+ }
+
+ slurp, err := io.ReadAll(stderr)
+ if err != nil {
+ return errors.Wrap(err, "")
+ }
+
+ if err := cmd.Wait(); err != nil {
+ return errors.Wrap(err, string(slurp))
+ }
+
+ return nil
+}
diff --git a/internal/utils/utils.go b/internal/utils/utils.go
new file mode 100644
index 0000000..9a58aaf
--- /dev/null
+++ b/internal/utils/utils.go
@@ -0,0 +1,193 @@
+package utils
+
+import (
+ "context"
+ "crypto/rand"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "math"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/cockroachdb/errors"
+ "github.com/gin-gonic/gin"
+ "github.com/google/uuid"
+ "github.com/projecteru2/vmihub/internal/utils/idgen"
+ "golang.org/x/crypto/bcrypt"
+)
+
+func EncryptPassword(passwd string) (string, error) {
+ // 加密密码
+ bs, err := bcrypt.GenerateFromPassword([]byte(passwd), bcrypt.DefaultCost)
+ return string(bs), err
+
+}
+
+func EnsureDir(d string) error {
+ err := os.MkdirAll(d, 0755)
+ if err != nil && !os.IsExist(err) {
+ return err
+ }
+ return nil
+}
+
+func GenerateUniqueID(input string) string {
+ sum := sha256.Sum256([]byte(input))
+ uniqueID := hex.EncodeToString(sum[:])
+ return uniqueID
+}
+
+func GetBooleanQuery(c *gin.Context, key string, dValue bool) bool {
+ if value, exists := c.GetQuery(key); exists {
+ return value == "true"
+ }
+ return dValue
+}
+
+func WithTimeout(ctx context.Context, timeout time.Duration, f func(ctx2 context.Context)) {
+ ctx, cancel := context.WithTimeout(ctx, timeout)
+ defer cancel()
+ f(ctx)
+}
+
+func GetUniqueStr() (string, error) {
+ id, err := uuid.NewUUID()
+ if err != nil {
+ return "", err
+ }
+ return fmt.Sprintf("%x", id), nil
+}
+
+func RandomString(length int) string {
+ b := make([]byte, length+2)
+ _, _ = rand.Read(b)
+ return fmt.Sprintf("%x", b)[2 : length+2]
+}
+
+// RadomStringByPhone 根据用户电话号码以及当前时间生成随机字符串,8位
+func RadomStringByPhone(phone string) string {
+ source := fmt.Sprintf("%s%d", phone, time.Now().UnixNano())
+ hash := sha256.New()
+ hash.Write([]byte(source))
+ hashedPhone := hash.Sum(nil)
+ hexString := hex.EncodeToString(hashedPhone)
+ return hexString[:8]
+}
+
+func GetUniqueSID() string {
+ // Generate a snowflake ID.
+ return idgen.NextSID()
+}
+
+func GetExpiredTimeByPeriod(period string) (*time.Time, error) {
+ timeDuration := ""
+ switch {
+ case strings.HasSuffix(period, "h"):
+ timeDuration = strings.TrimRight(period, "h")
+ case strings.HasSuffix(period, "m"):
+ timeDuration = strings.TrimRight(period, "m")
+ case strings.HasSuffix(period, "y"):
+ timeDuration = strings.TrimRight(period, "y")
+ default:
+ return nil, errors.New("period error")
+ }
+ timeDurationVal, err := strconv.Atoi(timeDuration)
+ if err != nil {
+ return nil, errors.New("period error")
+ }
+ if timeDurationVal <= 0 {
+ return nil, errors.New("period error")
+ }
+ nowTime := time.Now()
+ switch {
+ case strings.HasSuffix(period, "m"):
+ expiredTime := nowTime.AddDate(0, timeDurationVal, 0)
+ return &expiredTime, nil
+ case strings.HasSuffix(period, "y"):
+ expiredTime := nowTime.AddDate(timeDurationVal, 0, 0)
+ return &expiredTime, nil
+ }
+ return nil, errors.New("period error")
+}
+
+func GetMaxTime() time.Time {
+ maxTime := time.Date(9999, 12, 31, 23, 59, 59, 0, time.UTC)
+ return maxTime
+}
+
+func GetInternalIP() (string, error) {
+ ifaces, err := net.Interfaces()
+ if err != nil {
+ return "", fmt.Errorf("get Interfaces error:%v", err)
+ }
+
+ for _, iface := range ifaces {
+ addrs, err := iface.Addrs()
+ if err != nil {
+ fmt.Println("get Addrs error:", err)
+ continue
+ }
+
+ for _, addr := range addrs {
+ ipNet, ok := addr.(*net.IPNet)
+ if ok && !ipNet.IP.IsLoopback() {
+ if ipNet.IP.To4() != nil {
+ return ipNet.IP.String(), nil
+ }
+ }
+ }
+ }
+
+ return "", fmt.Errorf("error")
+}
+
+func Invoke(fn func() error) error {
+ return fn()
+}
+
+func Contains(sli []string, str string) bool {
+ for _, value := range sli {
+ if value == str {
+ return true
+ }
+ }
+ return false
+}
+
+// RoundMoney 四舍五入,小数点2位
+func RoundMoney(v float64) float64 {
+ return math.Round(v*math.Pow(10, 2)) / math.Pow(10, 2)
+}
+
+func UUIDStr() (string, error) {
+ uuidRaw, err := uuid.NewUUID()
+ if err != nil {
+ return "", err
+ }
+ return hex.EncodeToString(uuidRaw[:]), nil
+}
+
+// GenRandomBigString 生成指定长度大写字母组成的随机字符串
+func GenRandomBigString(length int) (string, error) {
+ const uppercaseLetters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ randomBytes := make([]byte, length)
+ _, err := rand.Read(randomBytes)
+ if err != nil {
+ return "", err
+ }
+ for i := 0; i < length; i++ {
+ randomBytes[i] = uppercaseLetters[int(randomBytes[i])%len(uppercaseLetters)]
+ }
+ return string(randomBytes), nil
+}
+
+// UUIDStrNew 六位随机大写字母_毫秒字符串
+func UUIDStrNew() (string, error) {
+ randomStr, err := GenRandomBigString(6)
+ milliSec := time.Now().UnixNano() / int64(time.Millisecond)
+ return fmt.Sprintf("%s_%d", randomStr, milliSec), err
+}
diff --git a/internal/utils/utils_test.go b/internal/utils/utils_test.go
new file mode 100644
index 0000000..5e9ef05
--- /dev/null
+++ b/internal/utils/utils_test.go
@@ -0,0 +1,38 @@
+package utils
+
+import (
+ "testing"
+
+ "github.com/projecteru2/vmihub/internal/utils/idgen"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestUniqueSID(t *testing.T) {
+ seen := make(map[string]bool)
+ for i := 0; i < 10000; i++ {
+ id := idgen.NextSID()
+ _, ok := seen[id]
+ assert.False(t, ok)
+ seen[id] = true
+ }
+}
+
+func TestRoundMoney(t *testing.T) {
+ cases := []struct {
+ input float64
+ res float64
+ }{
+ {
+ 10.1234,
+ 10.12,
+ },
+ {
+ 10.9876,
+ 10.99,
+ },
+ }
+ for _, c := range cases {
+ actual := RoundMoney(c.input)
+ assert.Equal(t, c.res, actual)
+ }
+}
diff --git a/internal/validator/validators.go b/internal/validator/validators.go
new file mode 100644
index 0000000..38f76b6
--- /dev/null
+++ b/internal/validator/validators.go
@@ -0,0 +1,19 @@
+package validator
+
+import (
+ "regexp"
+
+ "github.com/go-playground/validator/v10"
+)
+
+func ValidateMobile(f1 validator.FieldLevel) bool {
+ mobile := f1.Field().String()
+ ok, _ := regexp.MatchString(`^1([38][0-9]|14[579]|5[^4]|16[6]|7[1-35-8]|9[189])\d{8}$`, mobile)
+ return ok
+}
+
+func ValidateEmail(f1 validator.FieldLevel) bool {
+ email := f1.Field().String()
+ ok, _ := regexp.MatchString(`^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$`, email)
+ return ok
+}
diff --git a/internal/version/version.go b/internal/version/version.go
new file mode 100644
index 0000000..758ddbe
--- /dev/null
+++ b/internal/version/version.go
@@ -0,0 +1,28 @@
+package version
+
+import (
+ "fmt"
+ "runtime"
+)
+
+var (
+ // NAME is app name
+ NAME = "vmihub"
+ // VERSION is app version
+ VERSION = "unknown"
+ // REVISION is app revision
+ REVISION = "HEAD"
+ // BUILTAT is app built info
+ BUILTAT = "now"
+)
+
+// String show version thing
+func String() string {
+ version := ""
+ version += fmt.Sprintf("Version: %s\n", VERSION)
+ version += fmt.Sprintf("Git hash: %s\n", REVISION)
+ version += fmt.Sprintf("Built: %s\n", BUILTAT)
+ version += fmt.Sprintf("Golang version: %s\n", runtime.Version())
+ version += fmt.Sprintf("OS/Arch: %s/%s\n", runtime.GOOS, runtime.GOARCH)
+ return version
+}
diff --git a/pkg/terrors/errors.go b/pkg/terrors/errors.go
new file mode 100644
index 0000000..dfe05aa
--- /dev/null
+++ b/pkg/terrors/errors.go
@@ -0,0 +1,45 @@
+package terrors
+
+import "errors"
+
+var (
+ ErrPlaceholder = errors.New("placeholder")
+ ErrUnauthenticated = errors.New("please login")
+ ErrInvalidStore = errors.New("invalid storage")
+ ErrInvalidUserPass = errors.New("invalid username or password")
+ ErrInvalidDigest = errors.New("invalid digest")
+ ErrCreateWorkload = errors.New("failed to create workload")
+ ErrAllocateDataDisk = errors.New("failed to allocate data disk")
+ ErrConfict = errors.New("conflict")
+ ErrInvalidUserKey = errors.New("invalid username or key")
+ ErrInvalidState = errors.New("guest state is invalid")
+
+ ErrIPAMNoAvailableIP = errors.New("no available IP")
+ ErrIPAMNotReserved = errors.New("IP is not reserved")
+ ErrIPAMAlreadyAllocated = errors.New("IP is already allocated")
+ ErrIPAMInvalidIP = errors.New("invalid IP")
+ ErrIPAMInvalidIndex = errors.New("invalid index")
+
+ ErrRBDBusy = errors.New("rbd or snapshot is busy")
+ ErrRBDDependency = errors.New("rbd or snapshot is dependency")
+
+ ErrTokenExpired = errors.New("token is expired")
+ ErrTokenNotValidYet = errors.New("token not active yet")
+ ErrTokenMalformed = errors.New("that's not even a token")
+ ErrTokenInvalid = errors.New("couldn't handle this token")
+
+ ErrNotUploadYet = errors.New("file not upload yet")
+
+ ErrPublicPortNotReserved = errors.New("Public port is not reserved")
+ ErrPublicPortAlreadyAllocated = errors.New("Public port is already allocated")
+)
+
+type ErrHTTPResp struct { //nolint
+ Code int
+ Msg string
+ Err error
+}
+
+func (e *ErrHTTPResp) Error() string {
+ return e.Msg
+}
diff --git a/pkg/types/image.go b/pkg/types/image.go
new file mode 100644
index 0000000..07ae136
--- /dev/null
+++ b/pkg/types/image.go
@@ -0,0 +1,146 @@
+package types
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/pkg/errors"
+)
+
+type JSONResult struct {
+ Code int `json:"code" `
+ Message string `json:"msg"`
+ Data any `json:"data"`
+}
+
+type OSInfo struct {
+ Type string `json:"type" default:"linux"`
+ Distrib string `json:"distrib" default:"ubuntu"`
+ Version string `json:"version"`
+ Arch string `json:"arch" default:"amd64"`
+}
+
+func (info *OSInfo) String() string {
+ ty := strings.ToLower(info.Type)
+ switch ty {
+ case "linux":
+ return fmt.Sprintf("%s:%s [%s]", info.Distrib, info.Version, info.Arch)
+ default:
+ return fmt.Sprintf("%s:%s [%s]", info.Type, info.Version, info.Arch)
+ }
+}
+
+type ImageCreateRequest struct {
+ Username string `json:"username"`
+ Name string `json:"name"`
+ Tag string `json:"tag" default:"latest"`
+ Labels map[string]string `json:"labels"`
+ Size int64 `json:"size"`
+ Private bool `json:"private" default:"false"`
+ Digest string `json:"digest"`
+ Format string `json:"format"`
+ OS OSInfo `json:"os"`
+ Description string `json:"description"`
+ URL string `json:"url"`
+ RegionCode string `json:"region_code" default:"ap-yichang-1"`
+}
+
+func (req *ImageCreateRequest) Check() error {
+ if req.URL == "" && len(req.Digest) != 64 {
+ return errors.New("invalid digest, only accept sha256")
+ }
+ if req.OS.Type == "" {
+ return errors.New("os type is empty")
+ }
+ req.OS.Type = strings.ToLower(req.OS.Type)
+ if req.OS.Type == "linux" && req.OS.Distrib == "" {
+ return errors.New("os distrib is empty")
+ }
+ if req.OS.Arch == "" {
+ return errors.New("os arch is empty")
+ }
+ if req.Format == "" {
+ return errors.New("format is empty")
+ }
+ return nil
+}
+
+type ImageInfoRequest struct {
+ Username string
+ ImgName string
+ Perm string
+ RegionCode string
+}
+
+type ImagesByUsernameRequest struct {
+ Username string
+ Keyword string
+ PageNum int
+ PageSize int
+ RegionCode string
+}
+
+type ImageInfoResp struct {
+ ID int64 `json:"id"`
+ RepoID int64 `json:"repo_id"`
+ Username string `json:"username"`
+ Name string `json:"name"`
+ Tag string `json:"tag" description:"image tag, default:latest"`
+ Format string `json:"format"`
+ OS OSInfo `json:"os"`
+ Private bool `json:"private"`
+ Size int64 `json:"size"`
+ Digest string `json:"digest" description:"image digest"`
+ Snapshot string `json:"snapshot"`
+ Description string `json:"description" description:"image description"`
+ CreatedAt time.Time `json:"createdAt,omitempty" description:"image create time" example:"format: RFC3339"`
+ UpdatedAt time.Time `json:"updatedAt,omitempty" description:"image update time" example:"format: RFC3339"`
+}
+
+func (img *ImageInfoResp) Fullname() string {
+ if img.Username == "" || img.Username == "_" {
+ return fmt.Sprintf("%s:%s", img.Name, img.Tag)
+ }
+ return fmt.Sprintf("%s/%s:%s", img.Username, img.Name, img.Tag)
+}
+
+func (img *ImageInfoResp) RBDName() string {
+ name := strings.ReplaceAll(img.Fullname(), "/", ".")
+ return strings.ReplaceAll(name, ":", "-")
+}
+
+type ImageListResp struct {
+}
+
+type ContextKey string
+
+type ImageTreeNode struct {
+ Name string `json:"name"`
+ Image string `json:"image,omitempty"`
+ Children []*ImageTreeNode `json:"children"`
+ ChildMap map[string]*ImageTreeNode `json:"-"`
+}
+type ImageListTreeResp struct {
+ Images []*ImageTreeNode `json:"images"`
+}
+
+func (resp *ImageListTreeResp) Refine() {
+ var handleTreeNode func(*ImageTreeNode)
+ handleTreeNode = func(node *ImageTreeNode) {
+ if len(node.ChildMap) == 0 {
+ return
+ }
+ for _, child := range node.ChildMap {
+ handleTreeNode(child)
+ node.Children = append(node.Children, child)
+ }
+ }
+ for _, node := range resp.Images {
+ handleTreeNode(node)
+ }
+}
+
+type ImageLabel struct {
+ GPUTplList []string `json:"gputpl_list,omitempty"`
+}
diff --git a/pkg/types/user.go b/pkg/types/user.go
new file mode 100644
index 0000000..c764fec
--- /dev/null
+++ b/pkg/types/user.go
@@ -0,0 +1,87 @@
+package types
+
+import (
+ "time"
+
+ "github.com/cockroachdb/errors"
+)
+
+// RegisterRequest 定义用户登录结构体
+type RegisterRequest struct {
+ Username string `json:"username"`
+ Password string `json:"password" binding:"required,min=3,max=20"`
+ Email string `json:"email"`
+ SMSID string `json:"smsId" binding:"required"`
+ Code string `json:"code" binding:"required,len=6"`
+ Nickname string `json:"nickname"`
+}
+
+type UpdateUserRequest struct {
+ Email string `json:"email"`
+ Nickname string `json:"nickname"`
+}
+
+// ChangeUserPwdRequest 定义用户修改密码请求结构体
+type ChangeUserPwdRequest struct {
+ NewPassword string `json:"newPassword" binding:"required,min=3,max=20"`
+}
+
+type ResetUserPwdRequest struct {
+ Phone string `json:"phone" binding:"required"`
+ Password string `json:"password" binding:"required,min=3,max=20"`
+ Password1 string `json:"password1" binding:"required,min=3,max=20"`
+ SMSID string `json:"smsId" binding:"required"`
+ Code string `json:"code" binding:"required"`
+}
+
+func (req *ResetUserPwdRequest) Check() error {
+ if req.Password != req.Password1 {
+ return errors.New("password not match")
+ }
+ return nil
+}
+
+// LoginRequest 定义用户登录结构体
+type LoginRequest struct {
+ Username string `json:"username"`
+ Password string `json:"password"`
+}
+
+func (req *LoginRequest) Check() error {
+ if req.Password == "" {
+ return errors.New("password should not both be empty")
+ }
+ if req.Username == "" {
+ return errors.New("username should not be empty")
+ }
+ return nil
+}
+
+// RefreshRequest 定义刷新Token结构体
+type RefreshRequest struct {
+ AccessToken string `json:"accessToken" binding:"required"`
+ RefreshToken string `json:"refreshToken" binding:"required"`
+}
+
+type UserInfoResp struct {
+ ID int64 `json:"id"`
+ Username string `json:"username" binding:"required,min=1,max=20"`
+ Nickname string `json:"nickname" binding:"required,min=1,max=20"`
+ Email string `json:"email" binding:"required,email"`
+ IsAdmin bool `json:"isAdmin"`
+ Type string `json:"type"`
+}
+
+type TokenResponse struct {
+ AccessToken string `json:"accessToken"`
+ RefreshToken string `json:"refreshToken"`
+}
+
+type PrivateTokenRequest struct {
+ Name string `json:"name" binding:"required,min=1,max=20" example:"my-token"`
+ ExpiredAt time.Time `json:"expiredAt" example:"RFC3339: 2023-11-30T14:30:00.123+08:00"`
+}
+
+type PrivateTokenDeleteRequest struct {
+ Name string `json:"name" binding:"required,min=1,max=20"`
+}
diff --git a/pkg/utils/digest.go b/pkg/utils/digest.go
new file mode 100644
index 0000000..cf503c9
--- /dev/null
+++ b/pkg/utils/digest.go
@@ -0,0 +1,101 @@
+package utils
+
+import (
+ "crypto/sha256"
+ "fmt"
+ "io"
+ "mime/multipart"
+ "os"
+ "strings"
+
+ "github.com/pkg/errors"
+ "github.com/projecteru2/vmihub/client/terrors"
+)
+
+func Cached(digest, filePath string) (ans bool, err error) {
+ parts := strings.Split(digest, ":")
+ if len(parts) == 2 {
+ digest = parts[1]
+ }
+ localHash, err := CalcDigestOfFile(filePath)
+ if err != nil {
+ return
+ }
+ return digest == localHash, nil
+}
+
+func CalcDigestOfFile(fname string) (string, error) {
+ f, err := os.Open(fname)
+ if err != nil {
+ return "", err
+ }
+
+ defer f.Close()
+
+ h := sha256.New()
+
+ if _, err = io.Copy(h, f); err != nil {
+ return "", err
+ }
+ return fmt.Sprintf("%x", h.Sum(nil)), nil
+}
+
+func CalcDigestOfStr(ss string) (string, error) {
+ h := sha256.New()
+ _, err := h.Write([]byte(ss))
+ if err != nil {
+ return "", err
+ }
+ return fmt.Sprintf("%x", h.Sum(nil)), nil
+}
+
+// GetImageDigest Get file digest
+func GetImageDigest(file multipart.File) (string, error) {
+ // 创建 SHA-256 哈希对象
+ h := sha256.New()
+
+ // 将文件内容写入哈希对象中
+ if _, err := io.Copy(h, file); err != nil {
+ return "", err
+ }
+
+ // 计算哈希值
+ sum := h.Sum(nil)
+
+ // 将哈希值转换为十六进制字符串
+ digest := fmt.Sprintf("%x", sum)
+ return digest, nil
+}
+
+// PartRight partitions the str by the sep.
+func PartRight(str, sep string) (string, string) {
+ switch i := strings.LastIndex(str, sep); {
+ case i < 0:
+ return "", str
+ case i >= len(str)-1:
+ return str[:i], ""
+ default:
+ return str[:i], str[i+1:]
+ }
+}
+
+func ParseImageName(imgName string) (user, name, tag string, err error) {
+ var nameTag string
+ user, nameTag = PartRight(imgName, "/")
+ idx := strings.Index(nameTag, ":")
+ if idx < 0 {
+ name = nameTag
+ } else {
+ name, tag = nameTag[:idx], nameTag[idx+1:]
+ }
+ if tag == "" {
+ tag = "latest"
+ }
+ if user == "" {
+ user = "_"
+ }
+ if strings.Contains(tag, ":") {
+ err = errors.Wrapf(terrors.ErrInvalidImageName, "invlaid tag %s", tag)
+ }
+ return
+}
diff --git a/pkg/utils/digest_test.go b/pkg/utils/digest_test.go
new file mode 100644
index 0000000..1c0ccc6
--- /dev/null
+++ b/pkg/utils/digest_test.go
@@ -0,0 +1,53 @@
+package utils
+
+import (
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestGetImageDigest(t *testing.T) {
+ // 创建一个测试文件
+ fileContent := []byte("test file content")
+ tmpfile, err := ioutil.TempFile("", "test*.txt")
+ assert.Nil(t, err)
+
+ defer os.Remove(tmpfile.Name())
+
+ _, err = tmpfile.Write(fileContent)
+ assert.Nil(t, err)
+
+ err = tmpfile.Close()
+ assert.Nil(t, err)
+
+ // 计算文件的SHA-256哈希值
+ expectedDigest := sha256.Sum256(fileContent)
+ file, err := os.Open(tmpfile.Name())
+ assert.Nil(t, err)
+ defer file.Close()
+
+ digest, err := GetImageDigest(file)
+ assert.Nil(t, err)
+
+ // Compare the calculated hash value with the expected hash value
+ if digest != fmt.Sprintf("%x", expectedDigest) {
+ t.Errorf("Expected digest %x, but got %s", expectedDigest, digest)
+ }
+}
+
+func TestDigest(t *testing.T) {
+ ss := "hello world"
+ h := sha256.New()
+ _, err := h.Write([]byte(ss))
+ assert.Nil(t, err)
+ res1 := hex.EncodeToString(h.Sum(nil))
+
+ res2, err := CalcDigestOfStr(ss)
+ assert.Nil(t, err)
+ assert.Equal(t, res1, res2)
+}
diff --git a/spec.yaml b/spec.yaml
new file mode 100644
index 0000000..bfb5ed9
--- /dev/null
+++ b/spec.yaml
@@ -0,0 +1,54 @@
+appname: "vmihub"
+entrypoints:
+ main:
+ cmd: "/usr/local/bin/vmihub --config /etc/eru/vmihub.toml"
+ restart: always
+ publish:
+ - "8080"
+ healthcheck:
+ http_port: "8080"
+ url: "/healthz"
+ code: 200
+ privileged: true
+ # log:
+ # type: "none"
+# volumes:
+# - /sys:/sys:ro
+dns:
+ - 8.8.8.8
+
+stages:
+ - build
+ - pack
+builds:
+ build:
+ base: "golang:bookworm"
+ # only support ssh protocol
+ repo: "git@github.com:projecteru2/vmihub.git"
+ version: "HEAD"
+ dir: /go/src/github.com/projecteru2/vmihub
+ commands:
+ - sed -i 's/deb.debian.org/mirrors.ustc.edu.cn/g' /etc/apt/sources.list.d/debian.sources
+ - apt-get update
+ - apt-get install -y libcephfs-dev librbd-dev librados-dev
+ - make deps CN=1
+ - make build CN=1
+ - ./bin/vmihub --version
+ cache:
+ /go/src/github.com/projecteru2/vmihub/bin/vmihub: /usr/local/bin/vmihub
+ pack:
+ base: debian:bookworm
+ labels:
+ ERU: 1
+ version: latest
+ app: vmihub
+ app_entry: vmihub_main
+ # envs:
+ # AGENT_IN_DOCKER: 1
+ commands:
+ - sed -i 's/deb.debian.org/mirrors.ustc.edu.cn/g' /etc/apt/sources.list.d/debian.sources
+ - apt-get update
+ - apt-get install -y ca-certificates libcephfs-dev librbd-dev librados-dev genisoimage qemu-utils
+ - update-ca-certificates
+ - mkdir -p /etc/eru/
+ - mkdir -p /etc/ceph